Esempio n. 1
0
/* The backend is now connected so complete the connection process on our side */
static int
pcifront_connect(struct pcifront_device *pdev)
{
	device_t nexus;
	devclass_t nexus_devclass;

	/* We will add our device as a child of the nexus0 device */
	if (!(nexus_devclass = devclass_find("nexus")) ||
		!(nexus = devclass_get_device(nexus_devclass, 0))) {
		WPRINTF("could not find nexus0!\n");
		return -1;
	}

	/* Create a newbus device representing this frontend instance */
	pdev->ndev = BUS_ADD_CHILD(nexus, 0, "xpcife", pdev->unit);
	if (!pdev->ndev) {
		WPRINTF("could not create xpcife%d!\n", pdev->unit);
		return -EFAULT;
	}
	get_pdev(pdev);
	device_set_ivars(pdev->ndev, pdev);

	/* Good to go connected now */
	xenbus_switch_state(pdev->xdev, NULL, XenbusStateConnected);

	printf("pcifront: connected to %s\n", pdev->xdev->nodename);

	mtx_lock(&Giant);
	device_probe_and_attach(pdev->ndev);
	mtx_unlock(&Giant);

	return 0;
}
Esempio n. 2
0
    bool Task::start_prep()
    {
        /* Set pthread attributes.
        */
        if(pthread_attr_setstacksize(&(m_impl->attr), m_props.stack_size) !=
           0) {
            EPRINTF("%s:Failed setting task stack size\n", m_name);
            return false;
        }
        if(pthread_attr_setinheritsched(&(m_impl->attr),
                                        PTHREAD_EXPLICIT_SCHED) != 0) {
            EPRINTF("%s:Failed setting task schedule inheritance policy\n",
                    m_name);
            return false;
        }

        /* Call the task's initialization routine.
        */
        if(false == init()) {
            WPRINTF("%s task returned false at initialization and will not be "
                    "scheduled.\n",
                    m_name);
            return false;
        }

        return true;
    }
Esempio n. 3
0
static void
pci_vtnet_tap_setup(struct pci_vtnet_softc *sc, char *devname)
{
	char tbuf[80];
#ifndef WITHOUT_CAPSICUM
	cap_rights_t rights;
#endif

	strcpy(tbuf, "/dev/");
	strlcat(tbuf, devname, sizeof(tbuf));

	sc->pci_vtnet_rx = pci_vtnet_tap_rx;
	sc->pci_vtnet_tx = pci_vtnet_tap_tx;

	sc->vsc_tapfd = open(tbuf, O_RDWR);
	if (sc->vsc_tapfd == -1) {
		WPRINTF(("open of tap device %s failed\n", tbuf));
		return;
	}

	/*
	 * Set non-blocking and register for read
	 * notifications with the event loop
	 */
	int opt = 1;
	if (ioctl(sc->vsc_tapfd, FIONBIO, &opt) < 0) {
		WPRINTF(("tap device O_NONBLOCK failed\n"));
		close(sc->vsc_tapfd);
		sc->vsc_tapfd = -1;
	}

#ifndef WITHOUT_CAPSICUM
	cap_rights_init(&rights, CAP_EVENT, CAP_READ, CAP_WRITE);
	if (caph_rights_limit(sc->vsc_tapfd, &rights) == -1)
		errx(EX_OSERR, "Unable to apply rights for sandbox");
#endif

	sc->vsc_mevp = mevent_add(sc->vsc_tapfd,
				  EVF_READ,
				  pci_vtnet_rx_callback,
				  sc);
	if (sc->vsc_mevp == NULL) {
		WPRINTF(("Could not register event\n"));
		close(sc->vsc_tapfd);
		sc->vsc_tapfd = -1;
	}
}
Esempio n. 4
0
/* Write to the xenbus info needed by backend */
static int
pcifront_publish_info(struct pcifront_device *pdev)
{
	int err = 0;
	struct xenbus_transaction *trans;

	err = xenbus_grant_ring(pdev->xdev, virt_to_mfn(pdev->sh_info));
	if (err < 0) {
		WPRINTF("error granting access to ring page\n");
		goto out;
	}

	pdev->gnt_ref = err;

	err = xenbus_alloc_evtchn(pdev->xdev, &pdev->evtchn);
	if (err)
		goto out;

 do_publish:
	trans = xenbus_transaction_start();
	if (IS_ERR(trans)) {
		xenbus_dev_fatal(pdev->xdev, err,
						 "Error writing configuration for backend "
						 "(start transaction)");
		goto out;
	}

	err = xenbus_printf(trans, pdev->xdev->nodename,
						"pci-op-ref", "%u", pdev->gnt_ref);
	if (!err)
		err = xenbus_printf(trans, pdev->xdev->nodename,
							"event-channel", "%u", pdev->evtchn);
	if (!err)
		err = xenbus_printf(trans, pdev->xdev->nodename,
							"magic", XEN_PCI_MAGIC);
	if (!err)
		err = xenbus_switch_state(pdev->xdev, trans,
								  XenbusStateInitialised);

	if (err) {
		xenbus_transaction_end(trans, 1);
		xenbus_dev_fatal(pdev->xdev, err,
						 "Error writing configuration for backend");
		goto out;
	} else {
		err = xenbus_transaction_end(trans, 0);
		if (err == -EAGAIN)
			goto do_publish;
		else if (err) {
			xenbus_dev_fatal(pdev->xdev, err,
							 "Error completing transaction for backend");
			goto out;
		}
	}

 out:
	return err;
}
Esempio n. 5
0
static void
pci_vtnet_netmap_setup(struct pci_vtnet_softc *sc, char *ifname)
{
	sc->pci_vtnet_rx = pci_vtnet_netmap_rx;
	sc->pci_vtnet_tx = pci_vtnet_netmap_tx;

	sc->vsc_nmd = nm_open(ifname, NULL, 0, 0);
	if (sc->vsc_nmd == NULL) {
		WPRINTF(("open of netmap device %s failed\n", ifname));
		return;
	}

	sc->vsc_mevp = mevent_add(sc->vsc_nmd->fd,
				  EVF_READ,
				  pci_vtnet_rx_callback,
				  sc);
	if (sc->vsc_mevp == NULL) {
		WPRINTF(("Could not register event\n"));
		nm_close(sc->vsc_nmd);
		sc->vsc_nmd = NULL;
	}
}
Esempio n. 6
0
static int
pci_vtrnd_init(struct pci_devinst *pi, UNUSED char *opts)
{
	struct pci_vtrnd_softc *sc;
	int fd;
	int len;
	uint8_t v;

	/*
	 * Should always be able to open /dev/random.
	 */
	fd = open("/dev/random", O_RDONLY | O_NONBLOCK);

	assert(fd >= 0);

	/*
	 * Check that device is seeded and non-blocking.
	 */
	len = (int) read(fd, &v, sizeof(v));
	if (len <= 0) {
		WPRINTF(("vtrnd: /dev/random not ready, read(): %d", len));
		return (1);
	}

	sc = calloc(1, sizeof(struct pci_vtrnd_softc));

	vi_softc_linkup(&sc->vrsc_vs, &vtrnd_vi_consts, sc, pi, &sc->vrsc_vq);
	sc->vrsc_vs.vs_mtx = &sc->vrsc_mtx;

	sc->vrsc_vq.vq_qsize = VTRND_RINGSZ;

	/* keep /dev/random opened while emulating */
	sc->vrsc_fd = fd;

	/* initialize config space */
	pci_set_cfgdata16(pi, PCIR_DEVICE, VIRTIO_DEV_RANDOM);
	pci_set_cfgdata16(pi, PCIR_VENDOR, VIRTIO_VENDOR);
	pci_set_cfgdata8(pi, PCIR_CLASS, PCIC_CRYPTO);
	pci_set_cfgdata16(pi, PCIR_SUBDEV_0, VIRTIO_TYPE_ENTROPY);
	pci_set_cfgdata16(pi, PCIR_SUBVEND_0, VIRTIO_VENDOR);

	if (vi_intr_init(&sc->vrsc_vs, 1, fbsdrun_virtio_msix()))
		return (1);
	vi_set_io_bar(&sc->vrsc_vs, 0);

	return (0);
}
Esempio n. 7
0
/* Process PCI operation */
static int
do_pci_op(struct pcifront_device *pdev, struct xen_pci_op *op)
{
	int err = 0;
	struct xen_pci_op *active_op = &pdev->sh_info->op;
	evtchn_port_t port = pdev->evtchn;
	time_t timeout;

	mtx_lock(&pdev->sh_info_lock);

	memcpy(active_op, op, sizeof(struct xen_pci_op));

	/* Go */
	wmb();
	set_bit(_XEN_PCIF_active, (unsigned long *)&pdev->sh_info->flags);
	notify_remote_via_evtchn(port);

	timeout = time_uptime + 2;

	clear_evtchn(port);

	/* Spin while waiting for the answer */
	while (test_bit
	       (_XEN_PCIF_active, (unsigned long *)&pdev->sh_info->flags)) {
		int err = HYPERVISOR_poll(&port, 1, 3 * hz);
		if (err)
			panic("Failed HYPERVISOR_poll: err=%d", err);
		clear_evtchn(port);
		if (time_uptime > timeout) {
			WPRINTF("pciback not responding!!!\n");
			clear_bit(_XEN_PCIF_active,
				  (unsigned long *)&pdev->sh_info->flags);
			err = XEN_PCI_ERR_dev_not_found;
			goto out;
		}
	}

	memcpy(op, active_op, sizeof(struct xen_pci_op));

	err = op->err;
 out:
	mtx_unlock(&pdev->sh_info_lock);
	return err;
}
Esempio n. 8
0
static void
pci_vtcon_control_tx(struct pci_vtcon_port *port, void *arg, struct iovec *iov,
    int niov)
{
	struct pci_vtcon_softc *sc;
	struct pci_vtcon_port *tmp;
	struct pci_vtcon_control resp, *ctrl;
	int i;

	assert(niov == 1);

	sc = port->vsp_sc;
	ctrl = (struct pci_vtcon_control *)iov->iov_base;

	switch (ctrl->event) {
	case VTCON_DEVICE_READY:
		/* set port ready events for registered ports */
		for (i = 0; i < VTCON_MAXPORTS; i++) {
			tmp = &sc->vsc_ports[i];
			if (tmp->vsp_enabled)
				pci_vtcon_announce_port(tmp);
		}
		break;

	case VTCON_PORT_READY:
		if (ctrl->id >= sc->vsc_nports) {
			WPRINTF(("VTCON_PORT_READY event for unknown port %d\n",
			    ctrl->id));
			return;
		}

		tmp = &sc->vsc_ports[ctrl->id];
		if (tmp->vsp_console) {
			resp.event = VTCON_CONSOLE_PORT;
			resp.id = ctrl->id;
			resp.value = 1;
			pci_vtcon_control_send(sc, &resp, NULL, 0);
		}
		break;
	}
}
Esempio n. 9
0
static int
pci_vtnet_init(struct vmctx *ctx, struct pci_devinst *pi, char *opts)
{
	MD5_CTX mdctx;
	unsigned char digest[16];
	char nstr[80];
	char tname[MAXCOMLEN + 1];
	struct pci_vtnet_softc *sc;
	char *devname;
	char *vtopts;
	int mac_provided;

	sc = calloc(1, sizeof(struct pci_vtnet_softc));

	pthread_mutex_init(&sc->vsc_mtx, NULL);

	vi_softc_linkup(&sc->vsc_vs, &vtnet_vi_consts, sc, pi, sc->vsc_queues);
	sc->vsc_vs.vs_mtx = &sc->vsc_mtx;

	sc->vsc_queues[VTNET_RXQ].vq_qsize = VTNET_RINGSZ;
	sc->vsc_queues[VTNET_RXQ].vq_notify = pci_vtnet_ping_rxq;
	sc->vsc_queues[VTNET_TXQ].vq_qsize = VTNET_RINGSZ;
	sc->vsc_queues[VTNET_TXQ].vq_notify = pci_vtnet_ping_txq;
#ifdef notyet
	sc->vsc_queues[VTNET_CTLQ].vq_qsize = VTNET_RINGSZ;
        sc->vsc_queues[VTNET_CTLQ].vq_notify = pci_vtnet_ping_ctlq;
#endif
 
	/*
	 * Attempt to open the tap device and read the MAC address
	 * if specified
	 */
	mac_provided = 0;
	sc->vsc_tapfd = -1;
	if (opts != NULL) {
		char tbuf[80];
		int err;

		devname = vtopts = strdup(opts);
		(void) strsep(&vtopts, ",");

		if (vtopts != NULL) {
			err = pci_vtnet_parsemac(vtopts, sc->vsc_config.mac);
			if (err != 0) {
				free(devname);
				return (err);
			}
			mac_provided = 1;
		}

		strcpy(tbuf, "/dev/");
		strlcat(tbuf, devname, sizeof(tbuf));

		free(devname);

		sc->vsc_tapfd = open(tbuf, O_RDWR);
		if (sc->vsc_tapfd == -1) {
			WPRINTF(("open of tap device %s failed\n", tbuf));
		} else {
			/*
			 * Set non-blocking and register for read
			 * notifications with the event loop
			 */
			int opt = 1;
			if (ioctl(sc->vsc_tapfd, FIONBIO, &opt) < 0) {
				WPRINTF(("tap device O_NONBLOCK failed\n"));
				close(sc->vsc_tapfd);
				sc->vsc_tapfd = -1;
			}

			sc->vsc_mevp = mevent_add(sc->vsc_tapfd,
						  EVF_READ,
						  pci_vtnet_tap_callback,
						  sc);
			if (sc->vsc_mevp == NULL) {
				WPRINTF(("Could not register event\n"));
				close(sc->vsc_tapfd);
				sc->vsc_tapfd = -1;
			}
		}		
	}

	/*
	 * The default MAC address is the standard NetApp OUI of 00-a0-98,
	 * followed by an MD5 of the PCI slot/func number and dev name
	 */
	if (!mac_provided) {
		snprintf(nstr, sizeof(nstr), "%d-%d-%s", pi->pi_slot,
		    pi->pi_func, vmname);

		MD5Init(&mdctx);
		MD5Update(&mdctx, nstr, strlen(nstr));
		MD5Final(digest, &mdctx);

		sc->vsc_config.mac[0] = 0x00;
		sc->vsc_config.mac[1] = 0xa0;
		sc->vsc_config.mac[2] = 0x98;
		sc->vsc_config.mac[3] = digest[0];
		sc->vsc_config.mac[4] = digest[1];
		sc->vsc_config.mac[5] = digest[2];
	}

	/* initialize config space */
	pci_set_cfgdata16(pi, PCIR_DEVICE, VIRTIO_DEV_NET);
	pci_set_cfgdata16(pi, PCIR_VENDOR, VIRTIO_VENDOR);
	pci_set_cfgdata8(pi, PCIR_CLASS, PCIC_NETWORK);
	pci_set_cfgdata16(pi, PCIR_SUBDEV_0, VIRTIO_TYPE_NET);

	pci_lintr_request(pi);

	/* link always up */
	sc->vsc_config.status = 1;
	
	/* use BAR 1 to map MSI-X table and PBA, if we're using MSI-X */
	if (vi_intr_init(&sc->vsc_vs, 1, fbsdrun_virtio_msix()))
		return (1);

	/* use BAR 0 to map config regs in IO space */
	vi_set_io_bar(&sc->vsc_vs, 0);

	sc->resetting = 0;

	sc->rx_merge = 1;
	sc->rx_vhdrlen = sizeof(struct virtio_net_rxhdr);
	sc->rx_in_progress = 0;
	pthread_mutex_init(&sc->rx_mtx, NULL); 

	/* 
	 * Initialize tx semaphore & spawn TX processing thread.
	 * As of now, only one thread for TX desc processing is
	 * spawned. 
	 */
	sc->tx_in_progress = 0;
	pthread_mutex_init(&sc->tx_mtx, NULL);
	pthread_cond_init(&sc->tx_cond, NULL);
	pthread_create(&sc->tx_tid, NULL, pci_vtnet_tx_thread, (void *)sc);
	snprintf(tname, sizeof(tname), "vtnet-%d:%d tx", pi->pi_slot,
	    pi->pi_func);
        pthread_set_name_np(sc->tx_tid, tname);

	return (0);
}
Esempio n. 10
0
    bool Scheduler::execute()
    {
        static units::Nanoseconds time(0);

        DPRINTF("Begin executing the scheduler\n");
        DPRINTF("Scheduler mutex addr is %p\n", &(m_sched_impl->sync));

        /* If we let the period be zero, then the scheduler will always be
         * running.
         * TODO move this into an initialization routine called before
         * start().
         */
        if(0 == m_props.period)
        {
            EPRINTF("Invalid period\n");
            return false;
        }

        /* The first time through we have to get the time from the RTC.
         * Thereafter, we just add the period.
         */
        if(false == get_time(time))
        {
            EPRINTF("Failed to get the time\n");
            return false;
        }

        /* The scheduler tracks reference time. Reference time starts at zero
         * and gets incremented by the scheduler's period each time the
         * scheduler runs.
         */
        Reference_time &rtimer = Reference_time::get_instance();
        units::Nanoseconds ref_time(0);

        /* We have to have the mutex before calling wait.
        */
        if(false == this->lock())
        {
            EPRINTF("Failed to get mutex\n");
            return false;
        }

        /* Get an reference to the data router. We'll be calling this to move
         * data for the pub/sub system.
         */
        Data_router &router = Data_router::get_instance();

        DPRINTF("Entering the scheduler loop\n");

        /* Here's the meat of the show! This stuff is done in an infinite loop
         * while the task is running.
         */
        Sched_list &list = m_sched_impl->rategroup;
        while(this->is_operational() || sched_unwind_tics)
        {
            /* If the scheduler has been asked to halt, start counting down the
             * unwind tics. The unwind tics make sure that all tasks have had
             * time to complete before the schduler exits and ceases scheduling
             * the tasks.
             */
            if(!(this->is_operational()))
            {
                --sched_unwind_tics;
                DPRINTF("Unwinding, %u tics left\n", sched_unwind_tics);
            }

            /* The scheduler goes to sleep until its runtime period has elapsed.
             * Time of zero means wait forever on the sync point. This allows
             * the scheduler to be driven by an external time source.
             */
            DPRINTF("Wait for next tic\n");
            if(m_use_external_clock)
            {
                time = units::Nanoseconds(0);
            }
            else
            {
                time = units::Nanoseconds(m_props.period + time);
            }

            m_sched_impl->sync.condition_cleared();
            if(false == m_sched_impl->sync.wait(time))
            {
                EPRINTF("Scheduler wait failed\n");
                this->unlock();
                return false;
            }

            units::Nanoseconds start_time;
            get_time(start_time);

            /* Increment the reference timer by the period of the scheduler.
             * TODO This isn't really how I want to handle reference time.
             * Right now there is no lock around it so it could misbehave on
             * multicore. I think it should be sent via pub/sub message, but
             * I'm also using it as the predicate around the tasks wait
             * condition which makes some possibly undesirable dependencies.
             */
            ref_time = rtimer.increment(m_props.period);

            DPRINTF("ref_time = %" PRId64 "\n", int64_t(ref_time));

            /* Transfer all registered data that is scheduled to be moved this
             * period.
             */
            router.do_transfers(ref_time);
            DPRINTF("Data routing complete\n");

            /* For each rategroup, look to see if it is time to run.
             * TODO The rategroups should be in a sorted list so we can bail
             * early once we reach a period that is not modulo the reference
             * time.
             */
            unsigned int item_num = 0;
            for(Sched_list::Node *n = list.head(); n; n = n->next())
            {
                ++item_num;
                DPRINTF("Checking list item number %u\n", item_num);
                if(0 == (ref_time % n->data->period))
                {
                    if(n->data->finished)
                    {
                        n->data->rt_attr.last_runtime =
                            n->data->end_time - n->data->start_time;

                        if(n->data->rt_attr.last_runtime >
                           n->data->rt_attr.max_runtime)
                        {
                            n->data->rt_attr.max_runtime =
                                n->data->rt_attr.last_runtime;
                        }
                    }
                    else
                    {
                        WPRINTF("OVERRUN Rategroup %" PRId64 "\n",
                                int64_t(n->data->period));
                        ++(n->data->rt_attr.num_overruns);
#ifdef DEBUG_OVERRUN_BUG
                        fprintf(stderr, "num overruns = %u\n",
                                n->data->rt_attr.num_overruns);
                        fprintf(stderr, "last runtime = %ld\n",
                                int64_t(n->data->rt_attr.last_runtime));
                        fprintf(stderr, "max runtime = %ld\n",
                                int64_t(n->data->rt_attr.max_runtime));
#endif
                        n->data->rt_attr_symbol->entry->write(n->data->rt_attr);
                        /* If an overrun occurs in this rategroup, break out.
                         * If we try to grab the rategroup lock we could cause
                         * the scheduler to block for some non-deterministic
                         * period of time.
                         */
                        continue;
                    }
                    n->data->rt_attr_symbol->entry->write(n->data->rt_attr);

                    DPRINTF("Signaling %" PRId64 " period tasks\n",
                            int64_t(n->data->period));
                    if(false == n->data->sync.lock())
                    {
                        EPRINTF("Attaining rategroup lock\n");
                        continue;
                    }
                    n->data->start_time = start_time;
                    n->data->finished = false;
                    n->data->sync.condition_satisfied();
                    if(false == n->data->sync.release())
                    {
                        EPRINTF("Releasing rategroup\n");
                    }
                    if(false == n->data->sync.unlock())
                    {
                        EPRINTF("Releasing rategroup lock\n");
                    }
                }
            }

            DPRINTF("Scheduler is running!\n");
        }

        /* Release the scheduler lock and exit the scheduler task.
        */
        this->unlock();
        return false;
    }