Exemplo n.º 1
0
int send_request_to_dom0(void) 
{
	struct as_request *ring_req;
	int notify;
	static int reqid = 9;
	
	ring_req = RING_GET_REQUEST(&(info.ring), info.ring.req_prod_pvt);
	ring_req->id = reqid;
	ring_req->operation = reqid;
	ring_req->status = reqid;
	
	printk(KERN_DEBUG "\nxen:DomU: Fill in IDX-%d, with id=%d, op=%d, st=%d",
		info.ring.req_prod_pvt, ring_req->id, ring_req->operation, ring_req->status);
	reqid++;
	info.ring.req_prod_pvt += 1;
	
	RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&(info.ring), notify);
	if (notify) {
		printk(KERN_DEBUG "\nxen: DomU: sent a req to Dom0");
		notify_remote_via_irq(info.irq);
	} else {
		printk(KERN_DEBUG "\nxen:DomU: No notify req to Dom0");
		notify_remote_via_irq(info.irq);
	}
	
	printk("...\n");
	return 0;
}
Exemplo n.º 2
0
/* Performing the configuration space reads/writes must not be done in atomic
 * context because some of the pci_* functions can sleep (mostly due to ACPI
 * use of semaphores). This function is intended to be called from a work
 * queue in process context taking a struct pciback_device as a parameter */
void pciback_do_op(void *data)
{
	struct pciback_device *pdev = data;
	struct pci_dev *dev;
	struct xen_pci_op *op = &pdev->sh_info->op;

	dev = pciback_get_pci_dev(pdev, op->domain, op->bus, op->devfn);

	if (dev == NULL)
		op->err = XEN_PCI_ERR_dev_not_found;
	else if (op->cmd == XEN_PCI_OP_conf_read)
		op->err = pciback_config_read(dev, op->offset, op->size,
					      &op->value);
	else if (op->cmd == XEN_PCI_OP_conf_write)
		op->err = pciback_config_write(dev, op->offset, op->size,
					       op->value);
	else
		op->err = XEN_PCI_ERR_not_implemented;

	/* Tell the driver domain that we're done. */ 
	wmb();
	clear_bit(_XEN_PCIF_active, (unsigned long *)&pdev->sh_info->flags);
	notify_remote_via_irq(pdev->evtchn_irq);

	/* Mark that we're done. */
	smp_mb__before_clear_bit(); /* /after/ clearing PCIF_active */
	clear_bit(_PDEVF_op_active, &pdev->flags);
	smp_mb__after_clear_bit(); /* /before/ final check for work */

	/* Check to see if the driver domain tried to start another request in
	 * between clearing _XEN_PCIF_active and clearing _PDEVF_op_active. */
	test_and_schedule_op(pdev);
}
Exemplo n.º 3
0
/*
 * Send an IPI from the current CPU to the destination CPU.
 */
void
ipi_pcpu(unsigned int cpu, int vector) 
{ 
        int irq;

	irq = pcpu_find(cpu)->pc_ipi_to_irq[vector];
	
        notify_remote_via_irq(irq); 
} 
Exemplo n.º 4
0
/* 
 * Notify dom0 that the queue we want to use is full, it should
 * respond by setting MSG_AFLAGS_QUEUEUNOTFULL in due course
 */
inline void vnic_set_queue_full(netfront_accel_vnic *vnic)
{

	if (!test_and_set_bit(NET_ACCEL_MSG_AFLAGS_QUEUEUFULL_B,
			     (unsigned long *)&vnic->shared_page->aflags))
		notify_remote_via_irq(vnic->msg_channel_irq);
	else
		VPRINTK("queue full bit already set, not signalling\n");
}
Exemplo n.º 5
0
static void scsifront_do_request(struct vscsifrnt_info *info)
{
	struct vscsiif_front_ring *ring = &(info->ring);
	int notify;

	RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(ring, notify);
	if (notify)
		notify_remote_via_irq(info->irq);
}
Exemplo n.º 6
0
/* Performing the configuration space reads/writes must not be done in atomic
 * context because some of the pci_* functions can sleep (mostly due to ACPI
 * use of semaphores). This function is intended to be called from a work
 * queue in process context taking a struct pciback_device as a parameter */
void pciback_do_op(struct work_struct *work)
{
	struct pciback_device *pdev = container_of(work, struct pciback_device, op_work);
	struct pci_dev *dev;
	struct xen_pci_op *op = &pdev->sh_info->op;

	dev = pciback_get_pci_dev(pdev, op->domain, op->bus, op->devfn);

	if (dev == NULL)
		op->err = XEN_PCI_ERR_dev_not_found;
	else
	{
		switch (op->cmd)
		{
			case XEN_PCI_OP_conf_read:
				op->err = pciback_config_read(dev,
					  op->offset, op->size, &op->value);
				break;
			case XEN_PCI_OP_conf_write:
				op->err = pciback_config_write(dev,
					  op->offset, op->size,	op->value);
				break;
#ifdef CONFIG_PCI_MSI
			case XEN_PCI_OP_enable_msi:
				op->err = pciback_enable_msi(pdev, dev, op);
				break;
			case XEN_PCI_OP_disable_msi:
				op->err = pciback_disable_msi(pdev, dev, op);
				break;
			case XEN_PCI_OP_enable_msix:
				op->err = pciback_enable_msix(pdev, dev, op);
				break;
			case XEN_PCI_OP_disable_msix:
				op->err = pciback_disable_msix(pdev, dev, op);
				break;
#endif
			default:
				op->err = XEN_PCI_ERR_not_implemented;
				break;
		}
	}
	/* Tell the driver domain that we're done. */ 
	wmb();
	clear_bit(_XEN_PCIF_active, (unsigned long *)&pdev->sh_info->flags);
	notify_remote_via_irq(pdev->evtchn_irq);

	/* Mark that we're done. */
	smp_mb__before_clear_bit(); /* /after/ clearing PCIF_active */
	clear_bit(_PDEVF_op_active, &pdev->flags);
	smp_mb__after_clear_bit(); /* /before/ final check for work */

	/* Check to see if the driver domain tried to start another request in
	 * between clearing _XEN_PCIF_active and clearing _PDEVF_op_active. 
	*/
	test_and_schedule_op(pdev);
}
Exemplo n.º 7
0
static irqreturn_t input_handler(int rq, void *dev_id)
{
	struct xenkbd_info *info = dev_id;
	struct xenkbd_page *page = info->page;
	__u32 cons, prod;

	prod = page->in_prod;
	if (prod == page->in_cons)
		return IRQ_HANDLED;
	rmb();			/* ensure we see ring contents up to prod */
	for (cons = page->in_cons; cons != prod; cons++) {
		union xenkbd_in_event *event;
		struct input_dev *dev;
		event = &XENKBD_IN_RING_REF(page, cons);

		dev = info->ptr;
		switch (event->type) {
		case XENKBD_TYPE_MOTION:
			input_report_rel(dev, REL_X, event->motion.rel_x);
			input_report_rel(dev, REL_Y, event->motion.rel_y);
			if (event->motion.rel_z)
				input_report_rel(dev, REL_WHEEL,
						 -event->motion.rel_z);
			break;
		case XENKBD_TYPE_KEY:
			dev = NULL;
			if (test_bit(event->key.keycode, info->kbd->keybit))
				dev = info->kbd;
			if (test_bit(event->key.keycode, info->ptr->keybit))
				dev = info->ptr;
			if (dev)
				input_report_key(dev, event->key.keycode,
						 event->key.pressed);
			else
				printk(KERN_WARNING
				       "xenkbd: unhandled keycode 0x%x\n",
				       event->key.keycode);
			break;
		case XENKBD_TYPE_POS:
			input_report_abs(dev, ABS_X, event->pos.abs_x);
			input_report_abs(dev, ABS_Y, event->pos.abs_y);
			if (event->pos.rel_z)
				input_report_rel(dev, REL_WHEEL,
						 -event->pos.rel_z);
			break;
		}
		if (dev)
			input_sync(dev);
	}
	mb();			/* ensure we got ring contents */
	page->in_cons = cons;
	notify_remote_via_irq(info->irq);

	return IRQ_HANDLED;
}
Exemplo n.º 8
0
static inline void flush_requests(struct ixpfront_info *info)
{
	int notify;

	//printk(KERN_ERR "%s: pushing requests\n", __FUNCTION__);

	RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&info->ring, notify);

	if (notify) {
		notify_remote_via_irq(info->irq);
	}
}
Exemplo n.º 9
0
void
xen_platform_send_ipi(int cpu, int vector, int delivery_mode, int redirect)
{
#ifdef CONFIG_SMP
	/* TODO: we need to call vcpu_up here */
	if (unlikely(vector == ap_wakeup_vector)) {
		/* XXX
		 * This should be in __cpu_up(cpu) in ia64 smpboot.c
		 * like x86. But don't want to modify it,
		 * keep it untouched.
		 */
		xen_smp_intr_init_early(cpu);

		xen_send_ipi(cpu, vector);
		/* vcpu_prepare_and_up(cpu); */
		return;
	}
#endif

	switch (vector) {
	case IA64_IPI_VECTOR:
		xen_send_IPI_one(cpu, XEN_IPI_VECTOR);
		break;
	case IA64_IPI_RESCHEDULE:
		xen_send_IPI_one(cpu, XEN_RESCHEDULE_VECTOR);
		break;
	case IA64_CMCP_VECTOR:
		xen_send_IPI_one(cpu, XEN_CMCP_VECTOR);
		break;
	case IA64_CPEP_VECTOR:
		xen_send_IPI_one(cpu, XEN_CPEP_VECTOR);
		break;
	case IA64_TIMER_VECTOR: {
		/* this is used only once by check_sal_cache_flush()
		   at boot time */
		static int used = 0;
		if (!used) {
			xen_send_ipi(cpu, IA64_TIMER_VECTOR);
			used = 1;
			break;
		}
		/* fallthrough */
	}
	default:
		printk(KERN_WARNING "Unsupported IPI type 0x%x\n",
		       vector);
		notify_remote_via_irq(0); /* defaults to 0 irq */
		break;
	}
}
Exemplo n.º 10
0
void
xen_platform_send_ipi(int cpu, int vector, int delivery_mode, int redirect)
{
#ifdef CONFIG_SMP
	/*                                    */
	if (unlikely(vector == ap_wakeup_vector)) {
		/*    
                                                      
                                           
                       
   */
		xen_smp_intr_init_early(cpu);

		xen_send_ipi(cpu, vector);
		/*                           */
		return;
	}
#endif

	switch (vector) {
	case IA64_IPI_VECTOR:
		xen_send_IPI_one(cpu, XEN_IPI_VECTOR);
		break;
	case IA64_IPI_RESCHEDULE:
		xen_send_IPI_one(cpu, XEN_RESCHEDULE_VECTOR);
		break;
	case IA64_CMCP_VECTOR:
		xen_send_IPI_one(cpu, XEN_CMCP_VECTOR);
		break;
	case IA64_CPEP_VECTOR:
		xen_send_IPI_one(cpu, XEN_CPEP_VECTOR);
		break;
	case IA64_TIMER_VECTOR: {
		/*                                                  
                  */
		static int used = 0;
		if (!used) {
			xen_send_ipi(cpu, IA64_TIMER_VECTOR);
			used = 1;
			break;
		}
		/*             */
	}
	default:
		printk(KERN_WARNING "Unsupported IPI type 0x%x\n",
		       vector);
		notify_remote_via_irq(0); /*                   */
		break;
	}
}
Exemplo n.º 11
0
static int scsifront_do_request(struct vscsifrnt_info *info,
				struct vscsifrnt_shadow *shadow)
{
	struct vscsiif_front_ring *ring = &(info->ring);
	struct vscsiif_request *ring_req;
	struct scsi_cmnd *sc = shadow->sc;
	uint32_t id;
	int i, notify;

	if (RING_FULL(&info->ring))
		return -EBUSY;

	id = scsifront_get_rqid(info);	/* use id in response */
	if (id >= VSCSIIF_MAX_REQS)
		return -EBUSY;

	info->shadow[id] = shadow;
	shadow->rqid = id;

	ring_req = RING_GET_REQUEST(&(info->ring), ring->req_prod_pvt);
	ring->req_prod_pvt++;

	ring_req->rqid        = id;
	ring_req->act         = shadow->act;
	ring_req->ref_rqid    = shadow->ref_rqid;
	ring_req->nr_segments = shadow->nr_segments;

	ring_req->id      = sc->device->id;
	ring_req->lun     = sc->device->lun;
	ring_req->channel = sc->device->channel;
	ring_req->cmd_len = sc->cmd_len;

	BUG_ON(sc->cmd_len > VSCSIIF_MAX_COMMAND_SIZE);

	memcpy(ring_req->cmnd, sc->cmnd, sc->cmd_len);

	ring_req->sc_data_direction   = (uint8_t)sc->sc_data_direction;
	ring_req->timeout_per_command = sc->request->timeout / HZ;

	for (i = 0; i < (shadow->nr_segments & ~VSCSIIF_SG_GRANT); i++)
		ring_req->seg[i] = shadow->seg[i];

	RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(ring, notify);
	if (notify)
		notify_remote_via_irq(info->irq);

	return 0;
}
Exemplo n.º 12
0
void xen_send_IPI_one(unsigned int cpu, enum ipi_vector vector)
{
    int irq;

#ifdef CONFIG_X86
    if (unlikely(vector == XEN_NMI_VECTOR)) {
        int rc =  HYPERVISOR_vcpu_op(VCPUOP_send_nmi, cpu, NULL);
        if (rc < 0)
            printk(KERN_WARNING "Sending nmi to CPU%d failed (rc:%d)\n", cpu, rc);
        return;
    }
#endif
    irq = per_cpu(ipi_to_irq, cpu)[vector];
    BUG_ON(irq < 0);
    notify_remote_via_irq(irq);
}
Exemplo n.º 13
0
static irqreturn_t as_int(int irq, void *dev_id)
{
    RING_IDX rc, rp;
    as_request_t req;
    as_response_t resp;
    int more_to_do, notify;
    printk(KERN_DEBUG "\nxen:Dom0: as_int called with dev_id=%x info=%x", (unsigned int)dev_id, (unsigned int) &info);
    rc = info.ring.req_cons;
    rp = info.ring.sring->req_prod;
    printk(KERN_DEBUG " rc = %d rp = %d", rc, rp);
    while (rc != rp) {
       if (RING_REQUEST_CONS_OVERFLOW(&info.ring, rc))
           break;
       memcpy(&req, RING_GET_REQUEST(&info.ring, rc), sizeof(req));
       resp.id = req.id;
       resp.operation = req.operation;
       resp.status = req.status + 1;
       printk(KERN_DEBUG "\nxen:Dom0:Recvd at IDX-%d: id = %d, op=%d, status=%d", rc, req.id, req.operation, req.status);
       info.ring.req_cons = ++rc;
       barrier();
       switch(req.operation) {
          case 0:
              printk(KERN_DEBUG "\nxen:dom0:req.operation = 0");
              break;
          default:
              printk(KERN_DEBUG "\nxen:dom0:req.operation = %d", req.operation);
              break;
       }
      memcpy(RING_GET_RESPONSE(&info.ring, info.ring.rsp_prod_pvt), &resp, sizeof(resp));
      info.ring.rsp_prod_pvt++;
      RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&info.ring, notify);
      if (info.ring.rsp_prod_pvt == info.ring.req_cons) {
          RING_FINAL_CHECK_FOR_REQUESTS(&info.ring, more_to_do);
       } else if (RING_HAS_UNCONSUMED_REQUESTS(&info.ring)) {
          more_to_do = 1;
       }
       if (notify) {
          printk(KERN_DEBUG "\nxen:dom0:send notify to domu");
          notify_remote_via_irq(info.irq);
       }
    }
    return IRQ_HANDLED;
}
Exemplo n.º 14
0
static int connect(struct xenbus_device *dev)
{
	struct usbfront_info *info = dev->dev.driver_data;

	usbif_conn_request_t *req;
	int i, idx, err;
	int notify;
	char name[TASK_COMM_LEN];
	struct usb_hcd *hcd;

	hcd = info_to_hcd(info);
	snprintf(name, TASK_COMM_LEN, "xenhcd.%d", hcd->self.busnum);

	err = talk_to_backend(dev, info);
	if (err)
		return err;

	info->kthread = kthread_run(xenhcd_schedule, info, name);
	if (IS_ERR(info->kthread)) {
		err = PTR_ERR(info->kthread);
		info->kthread = NULL;
		xenbus_dev_fatal(dev, err, "Error creating thread");
		return err;
	}
	/* prepare ring for hotplug notification */
	for (idx = 0, i = 0; i < USB_CONN_RING_SIZE; i++) {
		req = RING_GET_REQUEST(&info->conn_ring, idx);
		req->id = idx;
		idx++;
	}
	info->conn_ring.req_prod_pvt = idx;

	RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&info->conn_ring, notify);
	if (notify)
		notify_remote_via_irq(info->irq);

	return 0;
}
Exemplo n.º 15
0
Arquivo: netback.c Projeto: 7799/linux
static void xenvif_rx_action(struct xenvif *vif)
{
	s8 status;
	u16 flags;
	struct xen_netif_rx_response *resp;
	struct sk_buff_head rxq;
	struct sk_buff *skb;
	LIST_HEAD(notify);
	int ret;
	unsigned long offset;
	bool need_to_notify = false;

	struct netrx_pending_operations npo = {
		.copy  = vif->grant_copy_op,
		.meta  = vif->meta,
	};

	skb_queue_head_init(&rxq);

	while ((skb = skb_dequeue(&vif->rx_queue)) != NULL) {
		RING_IDX max_slots_needed;
		RING_IDX old_req_cons;
		RING_IDX ring_slots_used;
		int i;

		/* We need a cheap worse case estimate for the number of
		 * slots we'll use.
		 */

		max_slots_needed = DIV_ROUND_UP(offset_in_page(skb->data) +
						skb_headlen(skb),
						PAGE_SIZE);
		for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
			unsigned int size;
			unsigned int offset;

			size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
			offset = skb_shinfo(skb)->frags[i].page_offset;

			/* For a worse-case estimate we need to factor in
			 * the fragment page offset as this will affect the
			 * number of times xenvif_gop_frag_copy() will
			 * call start_new_rx_buffer().
			 */
			max_slots_needed += DIV_ROUND_UP(offset + size,
							 PAGE_SIZE);
		}

		/* To avoid the estimate becoming too pessimal for some
		 * frontends that limit posted rx requests, cap the estimate
		 * at MAX_SKB_FRAGS.
		 */
		if (max_slots_needed > MAX_SKB_FRAGS)
			max_slots_needed = MAX_SKB_FRAGS;

		/* We may need one more slot for GSO metadata */
		if (skb_is_gso(skb) &&
		   (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4 ||
		    skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6))
			max_slots_needed++;

		/* If the skb may not fit then bail out now */
		if (!xenvif_rx_ring_slots_available(vif, max_slots_needed)) {
			skb_queue_head(&vif->rx_queue, skb);
			need_to_notify = true;
			vif->rx_last_skb_slots = max_slots_needed;
			break;
		} else
			vif->rx_last_skb_slots = 0;

		old_req_cons = vif->rx.req_cons;
		XENVIF_RX_CB(skb)->meta_slots_used = xenvif_gop_skb(skb, &npo);
		ring_slots_used = vif->rx.req_cons - old_req_cons;

		BUG_ON(ring_slots_used > max_slots_needed);

		__skb_queue_tail(&rxq, skb);
	}

	BUG_ON(npo.meta_prod > ARRAY_SIZE(vif->meta));

	if (!npo.copy_prod)
		goto done;

	BUG_ON(npo.copy_prod > MAX_GRANT_COPY_OPS);
	gnttab_batch_copy(vif->grant_copy_op, npo.copy_prod);

	while ((skb = __skb_dequeue(&rxq)) != NULL) {

		if ((1 << vif->meta[npo.meta_cons].gso_type) &
		    vif->gso_prefix_mask) {
			resp = RING_GET_RESPONSE(&vif->rx,
						 vif->rx.rsp_prod_pvt++);

			resp->flags = XEN_NETRXF_gso_prefix | XEN_NETRXF_more_data;

			resp->offset = vif->meta[npo.meta_cons].gso_size;
			resp->id = vif->meta[npo.meta_cons].id;
			resp->status = XENVIF_RX_CB(skb)->meta_slots_used;

			npo.meta_cons++;
			XENVIF_RX_CB(skb)->meta_slots_used--;
		}


		vif->dev->stats.tx_bytes += skb->len;
		vif->dev->stats.tx_packets++;

		status = xenvif_check_gop(vif,
					  XENVIF_RX_CB(skb)->meta_slots_used,
					  &npo);

		if (XENVIF_RX_CB(skb)->meta_slots_used == 1)
			flags = 0;
		else
			flags = XEN_NETRXF_more_data;

		if (skb->ip_summed == CHECKSUM_PARTIAL) /* local packet? */
			flags |= XEN_NETRXF_csum_blank | XEN_NETRXF_data_validated;
		else if (skb->ip_summed == CHECKSUM_UNNECESSARY)
			/* remote but checksummed. */
			flags |= XEN_NETRXF_data_validated;

		offset = 0;
		resp = make_rx_response(vif, vif->meta[npo.meta_cons].id,
					status, offset,
					vif->meta[npo.meta_cons].size,
					flags);

		if ((1 << vif->meta[npo.meta_cons].gso_type) &
		    vif->gso_mask) {
			struct xen_netif_extra_info *gso =
				(struct xen_netif_extra_info *)
				RING_GET_RESPONSE(&vif->rx,
						  vif->rx.rsp_prod_pvt++);

			resp->flags |= XEN_NETRXF_extra_info;

			gso->u.gso.type = vif->meta[npo.meta_cons].gso_type;
			gso->u.gso.size = vif->meta[npo.meta_cons].gso_size;
			gso->u.gso.pad = 0;
			gso->u.gso.features = 0;

			gso->type = XEN_NETIF_EXTRA_TYPE_GSO;
			gso->flags = 0;
		}

		xenvif_add_frag_responses(vif, status,
					  vif->meta + npo.meta_cons + 1,
					  XENVIF_RX_CB(skb)->meta_slots_used);

		RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&vif->rx, ret);

		need_to_notify |= !!ret;

		npo.meta_cons += XENVIF_RX_CB(skb)->meta_slots_used;
		dev_kfree_skb(skb);
	}

done:
	if (need_to_notify)
		notify_remote_via_irq(vif->rx_irq);
}

void xenvif_check_rx_xenvif(struct xenvif *vif)
{
	int more_to_do;

	RING_FINAL_CHECK_FOR_REQUESTS(&vif->tx, more_to_do);

	if (more_to_do)
		napi_schedule(&vif->napi);
}
static void xennet_alloc_rx_buffers(struct net_device *dev)
{
	unsigned short id;
	struct netfront_info *np = netdev_priv(dev);
	struct sk_buff *skb;
	struct page *page;
	int i, batch_target, notify;
	RING_IDX req_prod = np->rx.req_prod_pvt;
	grant_ref_t ref;
	unsigned long pfn;
	void *vaddr;
	struct xen_netif_rx_request *req;

	if (unlikely(!netif_carrier_ok(dev)))
		return;

	/*
	 * Allocate skbuffs greedily, even though we batch updates to the
	 * receive ring. This creates a less bursty demand on the memory
	 * allocator, so should reduce the chance of failed allocation requests
	 * both for ourself and for other kernel subsystems.
	 */
	batch_target = np->rx_target - (req_prod - np->rx.rsp_cons);
	for (i = skb_queue_len(&np->rx_batch); i < batch_target; i++) {
		skb = __netdev_alloc_skb(dev, RX_COPY_THRESHOLD,
					 GFP_ATOMIC | __GFP_NOWARN);
		if (unlikely(!skb))
			goto no_skb;

		page = alloc_page(GFP_ATOMIC | __GFP_NOWARN);
		if (!page) {
			kfree_skb(skb);
no_skb:
			/* Any skbuffs queued for refill? Force them out. */
			if (i != 0)
				goto refill;
			/* Could not allocate any skbuffs. Try again later. */
			mod_timer(&np->rx_refill_timer,
				  jiffies + (HZ/10));
			break;
		}

		skb_shinfo(skb)->frags[0].page = page;
		skb_shinfo(skb)->nr_frags = 1;
		__skb_queue_tail(&np->rx_batch, skb);
	}

	/* Is the batch large enough to be worthwhile? */
	if (i < (np->rx_target/2)) {
		if (req_prod > np->rx.sring->req_prod)
			goto push;
		return;
	}

	/* Adjust our fill target if we risked running out of buffers. */
	if (((req_prod - np->rx.sring->rsp_prod) < (np->rx_target / 4)) &&
	    ((np->rx_target *= 2) > np->rx_max_target))
		np->rx_target = np->rx_max_target;

 refill:
	for (i = 0; ; i++) {
		skb = __skb_dequeue(&np->rx_batch);
		if (skb == NULL)
			break;

		skb->dev = dev;

		id = xennet_rxidx(req_prod + i);

		BUG_ON(np->rx_skbs[id]);
		np->rx_skbs[id] = skb;

		ref = gnttab_claim_grant_reference(&np->gref_rx_head);
		BUG_ON((signed short)ref < 0);
		np->grant_rx_ref[id] = ref;

		pfn = page_to_pfn(skb_shinfo(skb)->frags[0].page);
		vaddr = page_address(skb_shinfo(skb)->frags[0].page);

		req = RING_GET_REQUEST(&np->rx, req_prod + i);
		gnttab_grant_foreign_access_ref(ref,
						np->xbdev->otherend_id,
						pfn_to_mfn(pfn),
						0);

		req->id = id;
		req->gref = ref;
	}

	wmb();		/* barrier so backend seens requests */

	/* Above is a suitable barrier to ensure backend will see requests. */
	np->rx.req_prod_pvt = req_prod + i;
 push:
	RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&np->rx, notify);
	if (notify)
		notify_remote_via_irq(np->netdev->irq);
}
Exemplo n.º 17
0
static void xennet_alloc_rx_buffers(struct net_device *dev)
{
	unsigned short id;
	struct netfront_info *np = netdev_priv(dev);
	struct sk_buff *skb;
	struct page *page;
	int i, batch_target, notify;
	RING_IDX req_prod = np->rx.req_prod_pvt;
	grant_ref_t ref;
	unsigned long pfn;
	void *vaddr;
	struct xen_netif_rx_request *req;

	if (unlikely(!netif_carrier_ok(dev)))
		return;

	
	batch_target = np->rx_target - (req_prod - np->rx.rsp_cons);
	for (i = skb_queue_len(&np->rx_batch); i < batch_target; i++) {
		skb = __netdev_alloc_skb(dev, RX_COPY_THRESHOLD + NET_IP_ALIGN,
					 GFP_ATOMIC | __GFP_NOWARN);
		if (unlikely(!skb))
			goto no_skb;

		
		skb_reserve(skb, NET_IP_ALIGN);

		page = alloc_page(GFP_ATOMIC | __GFP_NOWARN);
		if (!page) {
			kfree_skb(skb);
no_skb:
			
			if (i != 0)
				goto refill;
			
			mod_timer(&np->rx_refill_timer,
				  jiffies + (HZ/10));
			break;
		}

		skb_shinfo(skb)->frags[0].page = page;
		skb_shinfo(skb)->nr_frags = 1;
		__skb_queue_tail(&np->rx_batch, skb);
	}

	
	if (i < (np->rx_target/2)) {
		if (req_prod > np->rx.sring->req_prod)
			goto push;
		return;
	}

	
	if (((req_prod - np->rx.sring->rsp_prod) < (np->rx_target / 4)) &&
	    ((np->rx_target *= 2) > np->rx_max_target))
		np->rx_target = np->rx_max_target;

 refill:
	for (i = 0; ; i++) {
		skb = __skb_dequeue(&np->rx_batch);
		if (skb == NULL)
			break;

		skb->dev = dev;

		id = xennet_rxidx(req_prod + i);

		BUG_ON(np->rx_skbs[id]);
		np->rx_skbs[id] = skb;

		ref = gnttab_claim_grant_reference(&np->gref_rx_head);
		BUG_ON((signed short)ref < 0);
		np->grant_rx_ref[id] = ref;

		pfn = page_to_pfn(skb_shinfo(skb)->frags[0].page);
		vaddr = page_address(skb_shinfo(skb)->frags[0].page);

		req = RING_GET_REQUEST(&np->rx, req_prod + i);
		gnttab_grant_foreign_access_ref(ref,
						np->xbdev->otherend_id,
						pfn_to_mfn(pfn),
						0);

		req->id = id;
		req->gref = ref;
	}

	wmb();		

	
	np->rx.req_prod_pvt = req_prod + i;
 push:
	RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&np->rx, notify);
	if (notify)
		notify_remote_via_irq(np->netdev->irq);
}
Exemplo n.º 18
0
static irqreturn_t chrif_int(int irq, void *dev_id)
{
    int err;
	RING_IDX rc, rp;
    int more_to_do, notify;
    chrif_request_t req;
    chrif_response_t resp;
	printk(KERN_INFO "\n------------------------------start response-------------------------------------");
    printk(KERN_DEBUG "\nxen: Dom0: chrif_int called with dev_id=%x info=%x", (unsigned int)dev_id, (unsigned int) &info);
    rc = info.ring.req_cons;
    rp = info.ring.sring->req_prod;
    printk(KERN_DEBUG "\nxen: Dom0: rc = %d rp = %d", rc, rp);

    while (rc != rp) {
        if (RING_REQUEST_CONS_OVERFLOW(&info.ring, rc))
           	break;
       	memcpy(&req, RING_GET_REQUEST(&info.ring, rc), sizeof(req));
       	resp.id = req.id;
        resp.operation = req.operation;
        resp.status = req.status + 1;
        printk(KERN_DEBUG "\nxen: Dom0: Recvd at IDX-%d: id = %d, op=%d, status=%d", rc, req.id, req.operation, req.status);
        info.ring.req_cons = ++rc;
        barrier();
                
		printk(KERN_DEBUG "\nxen: Dom0: operation:  %s", op_name(resp.operation));
      	switch(resp.operation) {
            case CHRIF_OP_OPEN:
                info.chrif_filp = filp_open(DEVICE_PATH, O_RDWR, 0);
				printk(KERN_DEBUG "\nxen: dom0: response open");
                break;
            case CHRIF_OP_READ:{
				int cnt;
                resp.rdwr.len = req.rdwr.len;
				cnt = resp.rdwr.len/4096;
                printk(KERN_DEBUG "\nxen: dom0: read %d times", cnt);
				memset(op_page->addr, 0, 4096);
              if(rd_time == 0){
				old_fs = get_fs();
				set_fs(get_ds());
			    //read data from device to page 
				err =info.chrif_filp->f_op->read(info.chrif_filp, block_buf, resp.rdwr.len, &info.chrif_filp->f_pos);	
				set_fs(old_fs);
                if(err < 0)
					printk(KERN_DEBUG "\nxen: Dom0: read %u bytes error", resp.rdwr.len);
			  }	
                memcpy(op_page->addr, block_buf+rd_time*4096, 4096);
                rd_time++;
                if(rd_time == cnt){
                    rd_time = 0;
                    memset(block_buf, 0, resp.rdwr.len);
                }
                printk(KERN_DEBUG "\nxen: dom0: response read");
				break;
            }
            case CHRIF_OP_WRITE:{
				int count;
                resp.rdwr.len = req.rdwr.len;
				count = resp.rdwr.len/4096;
                printk(KERN_DEBUG "\nxen: dom0: write %d times", count);
                //if(count == 0){ block_buf = (char *)kmalloc(resp.rdwr.len, GFP_KERNEL);}
                memcpy(block_buf+wr_time*4096, op_page->addr, 4096);
                wr_time++;
              if(wr_time == count){
                old_fs = get_fs();
				set_fs(get_ds());
				//write data from page to device  
				err = info.chrif_filp->f_op->write(info.chrif_filp, block_buf, resp.rdwr.len, &info.chrif_filp->f_pos);	
				set_fs(old_fs);
                wr_time = 0;
                if(err < 0)
					printk(KERN_DEBUG "\nxen: Dom0: write %u bytes error", resp.rdwr.len);
                memset(block_buf, 0, resp.rdwr.len);
			  }	
                //kfree(block_buf);
                printk(KERN_DEBUG "\nxen: dom0: response write");
                break;
            }    
			case CHRIF_OP_IOCTL:{
				resp.ioc_parm.cmd = req.ioc_parm.cmd;
				switch(resp.ioc_parm.cmd){
					case PDMA_IOC_START_DMA:{
						old_fs = get_fs();
						set_fs(get_ds());
						err = info.chrif_filp->f_op->unlocked_ioctl(info.chrif_filp, PDMA_IOC_START_DMA, NULL);	
						set_fs(old_fs);
					 	if(err){
							printk(KERN_DEBUG "\nxen: Dom0: start-dma ioctl failed");
							resp.status = 0;
						}else  printk(KERN_DEBUG "\nxen: Dom0: start-dma ioctl success");
						//err = call_ioctl(info.chrif_filp, PDMA_IOC_START_DMA, NULL);	
                        get_block_info();
						break;
					}
					case PDMA_IOC_STOP_DMA:{
						//err = call_ioctl(info.chrif_filp, PDMA_IOC_STOP_DMA, NULL);	
						old_fs = get_fs();
						set_fs(get_ds());
						err = info.chrif_filp->f_op->unlocked_ioctl(info.chrif_filp, PDMA_IOC_STOP_DMA, NULL);	
						set_fs(old_fs);
					 	if(err){
							printk(KERN_DEBUG "\nxen: Dom0: stop-dma ioctl failed");
							resp.status = 0;
						}else  printk(KERN_DEBUG "\nxen: Dom0: stop-dma ioctl success");
						break;
					}
					case PDMA_IOC_INFO:{
						struct pdma_info pdma_info;
						//err = call_ioctl(info.chrif_filp, PDMA_IOC_INFO, (unsigned long)&pdma_info);	
						old_fs = get_fs();
						set_fs(get_ds());
						err = info.chrif_filp->f_op->unlocked_ioctl(info.chrif_filp, PDMA_IOC_INFO, (unsigned long)&pdma_info);	
						set_fs(old_fs);
					 	if(err){
							printk(KERN_DEBUG "\nxen: Dom0: info ioctl failed");
							resp.status = 0;
						}else  printk(KERN_DEBUG "\nxen: Dom0: info ioctl success");
						resp.ioc_parm.info = pdma_info;
						break;
					}
					case PDMA_IOC_STAT:{
						struct pdma_stat pdma_stat;
						//err = call_ioctl(info.chrif_filp, PDMA_IOC_STAT, (unsigned long)&pdma_stat);	
						old_fs = get_fs();
						set_fs(get_ds());
						err = info.chrif_filp->f_op->unlocked_ioctl(info.chrif_filp, PDMA_IOC_STAT, (unsigned long)&pdma_stat);	
						set_fs(old_fs);
					 	if(err){
							printk(KERN_DEBUG "\nxen: Dom0: stat ioctl failed");
							resp.status = 0;
						}else  printk(KERN_DEBUG "\nxen: Dom0: stat ioctl success");
						resp.ioc_parm.stat = pdma_stat;
						break;
					}
					case PDMA_IOC_RW_REG:{
						struct pdma_rw_reg ctrl = req.ioc_parm.ctrl;
						//err = call_ioctl(info.chrif_filp, PDMA_IOC_RW_REG, (unsigned long)&ctrl);	
						old_fs = get_fs();
						set_fs(get_ds());
						err = info.chrif_filp->f_op->unlocked_ioctl(info.chrif_filp, PDMA_IOC_RW_REG, (unsigned long)&ctrl);	
						set_fs(old_fs);
					 	if(err){
							printk(KERN_DEBUG "\nxen: Dom0: rw-reg ioctl failed");
							resp.status = 0;
						}else  printk(KERN_DEBUG "\nxen: Dom0: rw-reg ioctl success");
						resp.ioc_parm.ctrl = ctrl;
						break;
					}
                    default:
                        printk(KERN_DEBUG "\nxen: Dom0: unknow the operation");
                        break;
				}
				printk(KERN_INFO "\nxen: Dom0: response ioctl");
				break;
			}
			case CHRIF_OP_CLOSE:
				filp_close(info.chrif_filp, NULL);
				printk(KERN_INFO "\nxen: Dom0: response close");
				break;
            default:
                printk(KERN_DEBUG "\nxen: Dom0: unknow the operation");
                break;
        }

		memcpy(RING_GET_RESPONSE(&info.ring, info.ring.rsp_prod_pvt), &resp, sizeof(resp));
       	info.ring.rsp_prod_pvt++;
		//put response and check whether or not notify domU
        RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&info.ring, notify);
        if (info.ring.rsp_prod_pvt == info.ring.req_cons) 
		{
            RING_FINAL_CHECK_FOR_REQUESTS(&info.ring, more_to_do);
       	}
		else if (RING_HAS_UNCONSUMED_REQUESTS(&info.ring)) 
		{
           	more_to_do = 1;
        }
        if (notify) 
		{
          	printk(KERN_DEBUG "\nxen:dom0:send notify to domu");
          	notify_remote_via_irq(info.irq);
       	}
    }
    return IRQ_HANDLED;
}
Exemplo n.º 19
0
void xen_pcibk_do_op(struct work_struct *data)
{
	struct xen_pcibk_device *pdev =
		container_of(data, struct xen_pcibk_device, op_work);
	struct pci_dev *dev;
	struct xen_pcibk_dev_data *dev_data = NULL;
	struct xen_pci_op *op = &pdev->op;
	int test_intx = 0;
#ifdef CONFIG_PCI_MSI
	unsigned int nr = 0;
#endif

	*op = pdev->sh_info->op;
	barrier();
	dev = xen_pcibk_get_pci_dev(pdev, op->domain, op->bus, op->devfn);

	if (dev == NULL)
		op->err = XEN_PCI_ERR_dev_not_found;
	else {
		dev_data = pci_get_drvdata(dev);
		if (dev_data)
			test_intx = dev_data->enable_intx;
		switch (op->cmd) {
		case XEN_PCI_OP_conf_read:
			op->err = xen_pcibk_config_read(dev,
				  op->offset, op->size, &op->value);
			break;
		case XEN_PCI_OP_conf_write:
			op->err = xen_pcibk_config_write(dev,
				  op->offset, op->size,	op->value);
			break;
#ifdef CONFIG_PCI_MSI
		case XEN_PCI_OP_enable_msi:
			op->err = xen_pcibk_enable_msi(pdev, dev, op);
			break;
		case XEN_PCI_OP_disable_msi:
			op->err = xen_pcibk_disable_msi(pdev, dev, op);
			break;
		case XEN_PCI_OP_enable_msix:
			nr = op->value;
			op->err = xen_pcibk_enable_msix(pdev, dev, op);
			break;
		case XEN_PCI_OP_disable_msix:
			op->err = xen_pcibk_disable_msix(pdev, dev, op);
			break;
#endif
		default:
			op->err = XEN_PCI_ERR_not_implemented;
			break;
		}
	}
	if (!op->err && dev && dev_data) {
		/* Transition detected */
		if ((dev_data->enable_intx != test_intx))
			xen_pcibk_control_isr(dev, 0 /* no reset */);
	}
	pdev->sh_info->op.err = op->err;
	pdev->sh_info->op.value = op->value;
#ifdef CONFIG_PCI_MSI
	if (op->cmd == XEN_PCI_OP_enable_msix && op->err == 0) {
		unsigned int i;

		for (i = 0; i < nr; i++)
			pdev->sh_info->op.msix_entries[i].vector =
				op->msix_entries[i].vector;
	}
#endif
	/* Tell the driver domain that we're done. */
	wmb();
	clear_bit(_XEN_PCIF_active, (unsigned long *)&pdev->sh_info->flags);
	notify_remote_via_irq(pdev->evtchn_irq);

	/* Mark that we're done. */
	smp_mb__before_clear_bit(); /* /after/ clearing PCIF_active */
	clear_bit(_PDEVF_op_active, &pdev->flags);
	smp_mb__after_clear_bit(); /* /before/ final check for work */

	/* Check to see if the driver domain tried to start another request in
	 * between clearing _XEN_PCIF_active and clearing _PDEVF_op_active.
	*/
	xen_pcibk_test_and_schedule_op(pdev);
}
Exemplo n.º 20
0
static inline void __send_IPI_one(unsigned int cpu, int vector)
{
    int irq = per_cpu(ipi_to_irq, cpu)[vector];
    BUG_ON(irq < 0);
    notify_remote_via_irq(irq);
}
Exemplo n.º 21
0
static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
	unsigned short id;
	struct netfront_info *np = netdev_priv(dev);
	struct netfront_stats *stats = this_cpu_ptr(np->stats);
	struct xen_netif_tx_request *tx;
	struct xen_netif_extra_info *extra;
	char *data = skb->data;
	RING_IDX i;
	grant_ref_t ref;
	unsigned long mfn;
	int notify;
	int slots;
	unsigned int offset = offset_in_page(data);
	unsigned int len = skb_headlen(skb);
	unsigned long flags;

	/* If skb->len is too big for wire format, drop skb and alert
	 * user about misconfiguration.
	 */
	if (unlikely(skb->len > XEN_NETIF_MAX_TX_SIZE)) {
		net_alert_ratelimited(
			"xennet: skb->len = %u, too big for wire format\n",
			skb->len);
		goto drop;
	}

	slots = DIV_ROUND_UP(offset + len, PAGE_SIZE) +
		xennet_count_skb_frag_slots(skb);
	if (unlikely(slots > MAX_SKB_FRAGS + 1)) {
		net_alert_ratelimited(
			"xennet: skb rides the rocket: %d slots\n", slots);
		goto drop;
	}

	spin_lock_irqsave(&np->tx_lock, flags);

	if (unlikely(!netif_carrier_ok(dev) ||
		     (slots > 1 && !xennet_can_sg(dev)) ||
		     netif_needs_gso(skb, netif_skb_features(skb)))) {
		spin_unlock_irqrestore(&np->tx_lock, flags);
		goto drop;
	}

	i = np->tx.req_prod_pvt;

	id = get_id_from_freelist(&np->tx_skb_freelist, np->tx_skbs);
	np->tx_skbs[id].skb = skb;

	tx = RING_GET_REQUEST(&np->tx, i);

	tx->id   = id;
	ref = gnttab_claim_grant_reference(&np->gref_tx_head);
	BUG_ON((signed short)ref < 0);
	mfn = virt_to_mfn(data);
	gnttab_grant_foreign_access_ref(
		ref, np->xbdev->otherend_id, mfn, GNTMAP_readonly);
	tx->gref = np->grant_tx_ref[id] = ref;
	tx->offset = offset;
	tx->size = len;
	extra = NULL;

	tx->flags = 0;
	if (skb->ip_summed == CHECKSUM_PARTIAL)
		/* local packet? */
		tx->flags |= XEN_NETTXF_csum_blank | XEN_NETTXF_data_validated;
	else if (skb->ip_summed == CHECKSUM_UNNECESSARY)
		/* remote but checksummed. */
		tx->flags |= XEN_NETTXF_data_validated;

	if (skb_shinfo(skb)->gso_size) {
		struct xen_netif_extra_info *gso;

		gso = (struct xen_netif_extra_info *)
			RING_GET_REQUEST(&np->tx, ++i);

		if (extra)
			extra->flags |= XEN_NETIF_EXTRA_FLAG_MORE;
		else
			tx->flags |= XEN_NETTXF_extra_info;

		gso->u.gso.size = skb_shinfo(skb)->gso_size;
		gso->u.gso.type = XEN_NETIF_GSO_TYPE_TCPV4;
		gso->u.gso.pad = 0;
		gso->u.gso.features = 0;

		gso->type = XEN_NETIF_EXTRA_TYPE_GSO;
		gso->flags = 0;
		extra = gso;
	}

	np->tx.req_prod_pvt = i + 1;

	xennet_make_frags(skb, dev, tx);
	tx->size = skb->len;

	RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&np->tx, notify);
	if (notify)
		notify_remote_via_irq(np->netdev->irq);

	u64_stats_update_begin(&stats->syncp);
	stats->tx_bytes += skb->len;
	stats->tx_packets++;
	u64_stats_update_end(&stats->syncp);

	/* Note: It is not safe to access skb after xennet_tx_buf_gc()! */
	xennet_tx_buf_gc(dev);

	if (!netfront_tx_slot_available(np))
		netif_stop_queue(dev);

	spin_unlock_irqrestore(&np->tx_lock, flags);

	return NETDEV_TX_OK;

 drop:
	dev->stats.tx_dropped++;
	dev_kfree_skb(skb);
	return NETDEV_TX_OK;
}
Exemplo n.º 22
0
static irqreturn_t chrif_int(int irq, void *dev_id)
{
    int err;
    RING_IDX rc, rp;
    int more_to_do, notify;
    chrif_request_t req;
    chrif_response_t resp;
    printk(KERN_INFO "\n------------------------------start response-------------------------------------");
    printk(KERN_DEBUG "\nxen: Dom0: chrif_int called with dev_id=%x info=%x", (unsigned int)dev_id, (unsigned int) &info);
    rc = info.ring.req_cons;
    rp = info.ring.sring->req_prod;
    printk(KERN_DEBUG "\nxen: Dom0: rc = %d rp = %d", rc, rp);

    while (rc != rp) {
        if (RING_REQUEST_CONS_OVERFLOW(&info.ring, rc))
            break;
        memcpy(&req, RING_GET_REQUEST(&info.ring, rc), sizeof(req));
        resp.id = req.id;
        resp.operation = req.operation;
        resp.status = req.status + 1;
        printk(KERN_DEBUG "\nxen: Dom0: Recvd at IDX-%d: id = %d, op=%d, status=%d", rc, req.id, req.operation, req.status);
        info.ring.req_cons = ++rc;
        barrier();

        printk(KERN_DEBUG "\nxen: Dom0: operation:  %s", op_name(resp.operation));
        switch(resp.operation) {
        case CHRIF_OP_OPEN:
            info.chrif_filp = filp_open(DEVICE_PATH, O_RDWR, 0);
            printk(KERN_DEBUG "\nxen: dom0: response open");
            break;
        case CHRIF_OP_READ: {
            resp.rdwr.len = req.rdwr.len;
            //struct pdma_info pdma_info;
            //memset(op_page->addr, 0, resp.rdwr.len);
            old_fs = get_fs();
            set_fs(get_ds());
            //get read size of block
            //err = info.chrif_filp->f_op->unlocked_ioctl(info.chrif_filp, PDMA_IOC_INFO, (unsigned long)&pdma_info);
            //read data from device to page
            //err =info.chrif_filp->f_op->read(info.chrif_filp, op_page->addr, resp.rdwr.len, &info.chrif_filp->f_pos);
            set_fs(old_fs);
            if(err < 0)
                printk(KERN_DEBUG "\nxen: Dom0: read %u bytes error", resp.rdwr.len);
            printk(KERN_DEBUG "\nxen: dom0: response read");
            break;
        }
        case CHRIF_OP_WRITE: {
            int i = 0, count, ret;
            struct vm_struct *op_page;
            struct gnttab_map_grant_ref op_page_ops;
            struct gnttab_unmap_grant_ref op_page_unmap_ops;
            resp.rdwr.len = req.rdwr.len;

            count = resp.rdwr.len/4096;
            printk(KERN_DEBUG "\nxen: Dom0: write %u bytes %d times", resp.rdwr.len, count);

            block_buf = (char *)kmalloc(resp.rdwr.len, GFP_KERNEL);
            memset(block_buf, 0, resp.rdwr.len);

            while(i < count) {
                resp.op_gref[i] = req.op_gref[i];
                printk(KERN_DEBUG "\nxen: dom0: req.op_gref[0]: %d", resp.op_gref[i]);

                op_page = alloc_vm_area(PAGE_SIZE, NULL);
                if(op_page == 0) {
                    free_vm_area(op_page);
                    printk("\nxen: dom0: could not allocate shared_page");
                    return -EFAULT;
                }
                /*gnttab_set_map_op(&op_page_ops, (unsigned long)op_page->addr, GNTMAP_host_map, resp.op_gref[i], info.remoteDomain);

                 op_page_unmap_ops.host_addr = (unsigned long)(op_page->addr);
                 unmap_ops.handle = op_page_ops.handle;
                 if(HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op_page_ops, 1)){
                     printk(KERN_DEBUG "\nxen: dom0: HYPERVISOR map grant ref failed");
                     return -EFAULT;
                 }
                 if (op_page_ops.status) {
                     printk(KERN_DEBUG "\nxen: dom0: HYPERVISOR map grant ref failed status = %d", op_page_ops.status);
                     return -EFAULT;
                 }
                 printk(KERN_DEBUG "\nxen: dom0: map shared page success, shared_page=%x, handle = %x, status = %x", (unsigned int)op_page->addr, op_page_ops.handle, op_page_ops.status);

                 memcpy(block_buf+i*4096, op_page->addr, 4096);
                 ret = HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op_page_unmap_ops, 1);
                 if (ret == 0) {
                     printk(KERN_DEBUG "\nxen: dom0: dom0_exit: unmapped shared frame");
                 } else {
                     printk(KERN_DEBUG "\nxen: dom0: dom0_exit: unmapped shared frame failed");
                 }
                 free_vm_area(op_page);*/
                i++;
            }

            /*  old_fs = get_fs();
            set_fs(get_ds());
            //write data from page to device
            //err = info.chrif_filp->f_op->write(info.chrif_filp, block_buf, resp.rdwr.len, &info.chrif_filp->f_pos);
            set_fs(old_fs);
              if(err < 0)
            	printk(KERN_DEBUG "\nxen: Dom0: write %u bytes error", resp.rdwr.len);

              */ //kfree(block_buf);
            printk(KERN_DEBUG "\nxen: dom0: response write");
            break;
        }
        case CHRIF_OP_IOCTL: {
            resp.ioc_parm.cmd = req.ioc_parm.cmd;
            switch(resp.ioc_parm.cmd) {
            case PDMA_IOC_START_DMA: {
                old_fs = get_fs();
                set_fs(get_ds());
                err = info.chrif_filp->f_op->unlocked_ioctl(info.chrif_filp, PDMA_IOC_START_DMA, NULL);
                set_fs(old_fs);
                if(err) {
                    printk(KERN_DEBUG "\nxen: Dom0: start-dma ioctl failed");
                    resp.status = 0;
                } else  printk(KERN_DEBUG "\nxen: Dom0: start-dma ioctl success");
                //err = call_ioctl(info.chrif_filp, PDMA_IOC_START_DMA, NULL);
                break;
            }
            case PDMA_IOC_STOP_DMA: {
                //err = call_ioctl(info.chrif_filp, PDMA_IOC_STOP_DMA, NULL);
                old_fs = get_fs();
                set_fs(get_ds());
                err = info.chrif_filp->f_op->unlocked_ioctl(info.chrif_filp, PDMA_IOC_STOP_DMA, NULL);
                set_fs(old_fs);
                if(err) {
                    printk(KERN_DEBUG "\nxen: Dom0: stop-dma ioctl failed");
                    resp.status = 0;
                } else  printk(KERN_DEBUG "\nxen: Dom0: stop-dma ioctl success");
                break;
            }
            case PDMA_IOC_INFO: {
                struct pdma_info pdma_info;
                //err = call_ioctl(info.chrif_filp, PDMA_IOC_INFO, (unsigned long)&pdma_info);
                old_fs = get_fs();
                set_fs(get_ds());
                err = info.chrif_filp->f_op->unlocked_ioctl(info.chrif_filp, PDMA_IOC_INFO, (unsigned long)&pdma_info);
                set_fs(old_fs);
                if(err) {
                    printk(KERN_DEBUG "\nxen: Dom0: info ioctl failed");
                    resp.status = 0;
                } else  printk(KERN_DEBUG "\nxen: Dom0: info ioctl success");
                resp.ioc_parm.info = pdma_info;
                break;
            }
            case PDMA_IOC_STAT: {
                struct pdma_stat pdma_stat;
                //err = call_ioctl(info.chrif_filp, PDMA_IOC_STAT, (unsigned long)&pdma_stat);
                old_fs = get_fs();
                set_fs(get_ds());
                err = info.chrif_filp->f_op->unlocked_ioctl(info.chrif_filp, PDMA_IOC_STAT, (unsigned long)&pdma_stat);
                set_fs(old_fs);
                if(err) {
                    printk(KERN_DEBUG "\nxen: Dom0: stat ioctl failed");
                    resp.status = 0;
                } else  printk(KERN_DEBUG "\nxen: Dom0: stat ioctl success");
                resp.ioc_parm.stat = pdma_stat;
                break;
            }
            case PDMA_IOC_RW_REG: {
                struct pdma_rw_reg ctrl = req.ioc_parm.ctrl;
                //err = call_ioctl(info.chrif_filp, PDMA_IOC_RW_REG, (unsigned long)&ctrl);
                old_fs = get_fs();
                set_fs(get_ds());
                err = info.chrif_filp->f_op->unlocked_ioctl(info.chrif_filp, PDMA_IOC_RW_REG, (unsigned long)&ctrl);
                set_fs(old_fs);
                if(err) {
                    printk(KERN_DEBUG "\nxen: Dom0: rw-reg ioctl failed");
                    resp.status = 0;
                } else  printk(KERN_DEBUG "\nxen: Dom0: rw-reg ioctl success");
                resp.ioc_parm.ctrl = ctrl;
                break;
            }
            default:
                printk(KERN_DEBUG "\nxen: Dom0: unknow the operation");
                break;
            }
            printk(KERN_INFO "\nxen: Dom0: response ioctl");
            break;
        }
        case CHRIF_OP_CLOSE:
            filp_close(info.chrif_filp, NULL);
            printk(KERN_INFO "\nxen: Dom0: response close");
            break;
        default:
            printk(KERN_DEBUG "\nxen: Dom0: unknow the operation");
            break;
        }

        memcpy(RING_GET_RESPONSE(&info.ring, info.ring.rsp_prod_pvt), &resp, sizeof(resp));
        info.ring.rsp_prod_pvt++;
        //put response and check whether or not notify domU
        RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&info.ring, notify);
        if (info.ring.rsp_prod_pvt == info.ring.req_cons)
        {
            RING_FINAL_CHECK_FOR_REQUESTS(&info.ring, more_to_do);
        }
        else if (RING_HAS_UNCONSUMED_REQUESTS(&info.ring))
        {
            more_to_do = 1;
        }
        if (notify)
        {
            printk(KERN_DEBUG "\nxen:dom0:send notify to domu");
            notify_remote_via_irq(info.irq);
        }
    }
    return IRQ_HANDLED;
}
static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
	unsigned short id;
	struct netfront_info *np = netdev_priv(dev);
	struct xen_netif_tx_request *tx;
	struct xen_netif_extra_info *extra;
	char *data = skb->data;
	RING_IDX i;
	grant_ref_t ref;
	unsigned long mfn;
	int notify;
	int frags = skb_shinfo(skb)->nr_frags;
	unsigned int offset = offset_in_page(data);
	unsigned int len = skb_headlen(skb);

	frags += (offset + len + PAGE_SIZE - 1) / PAGE_SIZE;
	if (unlikely(frags > MAX_SKB_FRAGS + 1)) {
		printk(KERN_ALERT "xennet: skb rides the rocket: %d frags\n",
		       frags);
		dump_stack();
		goto drop;
	}

	spin_lock_irq(&np->tx_lock);

	if (unlikely(!netif_carrier_ok(dev) ||
		     (frags > 1 && !xennet_can_sg(dev)) ||
		     netif_needs_gso(dev, skb))) {
		spin_unlock_irq(&np->tx_lock);
		goto drop;
	}

	i = np->tx.req_prod_pvt;

	id = get_id_from_freelist(&np->tx_skb_freelist, np->tx_skbs);
	np->tx_skbs[id].skb = skb;

	tx = RING_GET_REQUEST(&np->tx, i);

	tx->id   = id;
	ref = gnttab_claim_grant_reference(&np->gref_tx_head);
	BUG_ON((signed short)ref < 0);
	mfn = virt_to_mfn(data);
	gnttab_grant_foreign_access_ref(
		ref, np->xbdev->otherend_id, mfn, GNTMAP_readonly);
	tx->gref = np->grant_tx_ref[id] = ref;
	tx->offset = offset;
	tx->size = len;
	extra = NULL;

	tx->flags = 0;
	if (skb->ip_summed == CHECKSUM_PARTIAL)
		/* local packet? */
		tx->flags |= NETTXF_csum_blank | NETTXF_data_validated;
	else if (skb->ip_summed == CHECKSUM_UNNECESSARY)
		/* remote but checksummed. */
		tx->flags |= NETTXF_data_validated;

	if (skb_shinfo(skb)->gso_size) {
		struct xen_netif_extra_info *gso;

		gso = (struct xen_netif_extra_info *)
			RING_GET_REQUEST(&np->tx, ++i);

		if (extra)
			extra->flags |= XEN_NETIF_EXTRA_FLAG_MORE;
		else
			tx->flags |= NETTXF_extra_info;

		gso->u.gso.size = skb_shinfo(skb)->gso_size;
		gso->u.gso.type = XEN_NETIF_GSO_TYPE_TCPV4;
		gso->u.gso.pad = 0;
		gso->u.gso.features = 0;

		gso->type = XEN_NETIF_EXTRA_TYPE_GSO;
		gso->flags = 0;
		extra = gso;
	}

	np->tx.req_prod_pvt = i + 1;

	xennet_make_frags(skb, dev, tx);
	tx->size = skb->len;

	RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&np->tx, notify);
	if (notify)
		notify_remote_via_irq(np->netdev->irq);

	dev->stats.tx_bytes += skb->len;
	dev->stats.tx_packets++;

	/* Note: It is not safe to access skb after xennet_tx_buf_gc()! */
	xennet_tx_buf_gc(dev);

	if (!netfront_tx_slot_available(np))
		netif_stop_queue(dev);

	spin_unlock_irq(&np->tx_lock);

	return 0;

 drop:
	dev->stats.tx_dropped++;
	dev_kfree_skb(skb);
	return 0;
}
Exemplo n.º 24
0
/**
 * Main handler for OpenXT PV input.
 */
static irqreturn_t input_handler(int rq, void *dev_id)
{
    __u32 cons, prod;

    //Get a reference to the device's information structure...
    struct openxt_kbd_info *info = dev_id;

    //... and get a reference to the shared page used for communications.
    struct xenkbd_page *page = info->page;

    //If we have the latest data from the ringbuffer, we're done!
    prod = page->in_prod;
    if (prod == page->in_cons)
        return IRQ_HANDLED;

    //Ensure that we always see the latest data.
    rmb();

    //For each outstanding event in the ringbuffer...
    for (cons = page->in_cons; cons != prod; cons++) {
        union oxtkbd_in_event *event;

        //Get a reference to the current event.
        event = &OXT_KBD_IN_RING_REF(page, cons);

        switch (event->type) {

        case OXT_KBD_TYPE_MOTION:
            __handle_relative_motion(info, event);
            break;

        case OXT_KBD_TYPE_KEY:
            __handle_key_or_button_press(info, event);
            break;

        case OXT_KBD_TYPE_POS:
            __handle_absolute_motion(info, event);
            break;

        case OXT_KBD_TYPE_TOUCH_DOWN:
            __handle_touch_down(info, event);
            __handle_touch_movement(info, event, false, false);
              break;

        case OXT_KBD_TYPE_TOUCH_UP:
            __handle_touch_up(info, event);
            break;

        case OXT_KBD_TYPE_TOUCH_MOVE:
            __handle_touch_movement(info, event, true, false);
            break;

        case OXT_KBD_TYPE_TOUCH_FRAME:
            __handle_touch_framing(info, event);
            break;

        }
    }

    //Free the relevant space in the ringbuffer...
    mb();
    page->in_cons = cons;

    //... and signal to the other side that we're ready
    //for more data.
    notify_remote_via_irq(info->irq);
    return IRQ_HANDLED;
}