Exemple #1
0
/*
 * Ctor.
 */
static int
vether_clone_create(struct if_clone *ifc, int unit, caddr_t data)
{
	struct vether_softc *sc;
	struct ifnet *ifp;	
	uint32_t randval;
	uint8_t	lla[ETHER_ADDR_LEN];
/*
 * Allocate software context.
 */ 
	sc = malloc(sizeof(struct vether_softc), M_DEVBUF, M_WAITOK|M_ZERO);
	ifp = sc->sc_ifp = if_alloc(IFT_ETHER);
	if (ifp == NULL) {
		free(sc, M_DEVBUF);
		return (ENOSPC);
	}
	if_initname(ifp, vether_name, unit);
/*
 * Bind software context.
 */ 	
	VETHER_LOCK_INIT(sc);
	ifp->if_softc = sc;
/*
 * Initialize specific attributes.
 */ 
	ifp->if_init = vether_init;
	ifp->if_ioctl = vether_ioctl;
	ifp->if_start = vether_start;
 
 	ifp->if_snd.ifq_maxlen = ifqmaxlen;
 	ifp->if_flags = (IFF_SIMPLEX|IFF_BROADCAST|IFF_MULTICAST|IFF_VETHER);
 
	ifp->if_capabilities = IFCAP_VLAN_MTU|IFCAP_JUMBO_MTU;
	ifp->if_capenable = IFCAP_VLAN_MTU|IFCAP_JUMBO_MTU;
	
	ifmedia_init(&sc->sc_ifm, 0, vether_media_change, vether_media_status);
	ifmedia_add(&sc->sc_ifm, IFM_ETHER|IFM_AUTO, 0, NULL);
	ifmedia_set(&sc->sc_ifm, IFM_ETHER|IFM_AUTO);
 	
 	sc->sc_status = IFM_AVALID;
/*
 * Generate randomized lla.  
 */
	lla[0] = 0x0;
	randval = arc4random();
	memcpy(&lla[1], &randval, sizeof(uint32_t));
	lla[5] = (uint8_t)unit; /* Interface major number */
/*
 * Initialize ethernet specific attributes and perform inclusion 
 * mapping on link layer, netgraph(4) domain and generate by bpf(4) 
 * implemented Inspection Access Point maps to by ifnet(9) defined 
 * generic interface.
 */ 	
 	ether_ifattach(ifp, lla);
 	ifp->if_baudrate = 0;
 
	mtx_lock(&vether_list_mtx);
	LIST_INSERT_HEAD(&vether_list, sc, vether_list);
	mtx_unlock(&vether_list_mtx);

	ifp->if_drv_flags |= IFF_DRV_RUNNING;
 
	return (0);
}
Exemple #2
0
static void
do_fork(struct thread *td, struct fork_req *fr, struct proc *p2, struct thread *td2,
    struct vmspace *vm2, struct file *fp_procdesc)
{
	struct proc *p1, *pptr;
	int trypid;
	struct filedesc *fd;
	struct filedesc_to_leader *fdtol;
	struct sigacts *newsigacts;

	sx_assert(&proctree_lock, SX_SLOCKED);
	sx_assert(&allproc_lock, SX_XLOCKED);

	p1 = td->td_proc;

	trypid = fork_findpid(fr->fr_flags);

	sx_sunlock(&proctree_lock);

	p2->p_state = PRS_NEW;		/* protect against others */
	p2->p_pid = trypid;
	AUDIT_ARG_PID(p2->p_pid);
	LIST_INSERT_HEAD(&allproc, p2, p_list);
	allproc_gen++;
	LIST_INSERT_HEAD(PIDHASH(p2->p_pid), p2, p_hash);
	tidhash_add(td2);
	PROC_LOCK(p2);
	PROC_LOCK(p1);

	sx_xunlock(&allproc_lock);

	bcopy(&p1->p_startcopy, &p2->p_startcopy,
	    __rangeof(struct proc, p_startcopy, p_endcopy));
	pargs_hold(p2->p_args);

	PROC_UNLOCK(p1);

	bzero(&p2->p_startzero,
	    __rangeof(struct proc, p_startzero, p_endzero));

	/* Tell the prison that we exist. */
	prison_proc_hold(p2->p_ucred->cr_prison);

	PROC_UNLOCK(p2);

	/*
	 * Malloc things while we don't hold any locks.
	 */
	if (fr->fr_flags & RFSIGSHARE)
		newsigacts = NULL;
	else
		newsigacts = sigacts_alloc();

	/*
	 * Copy filedesc.
	 */
	if (fr->fr_flags & RFCFDG) {
		fd = fdinit(p1->p_fd, false);
		fdtol = NULL;
	} else if (fr->fr_flags & RFFDG) {
		fd = fdcopy(p1->p_fd);
		fdtol = NULL;
	} else {
		fd = fdshare(p1->p_fd);
		if (p1->p_fdtol == NULL)
			p1->p_fdtol = filedesc_to_leader_alloc(NULL, NULL,
			    p1->p_leader);
		if ((fr->fr_flags & RFTHREAD) != 0) {
			/*
			 * Shared file descriptor table, and shared
			 * process leaders.
			 */
			fdtol = p1->p_fdtol;
			FILEDESC_XLOCK(p1->p_fd);
			fdtol->fdl_refcount++;
			FILEDESC_XUNLOCK(p1->p_fd);
		} else {
			/* 
			 * Shared file descriptor table, and different
			 * process leaders.
			 */
			fdtol = filedesc_to_leader_alloc(p1->p_fdtol,
			    p1->p_fd, p2);
		}
	}
	/*
	 * Make a proc table entry for the new process.
	 * Start by zeroing the section of proc that is zero-initialized,
	 * then copy the section that is copied directly from the parent.
	 */

	PROC_LOCK(p2);
	PROC_LOCK(p1);

	bzero(&td2->td_startzero,
	    __rangeof(struct thread, td_startzero, td_endzero));

	bcopy(&td->td_startcopy, &td2->td_startcopy,
	    __rangeof(struct thread, td_startcopy, td_endcopy));

	bcopy(&p2->p_comm, &td2->td_name, sizeof(td2->td_name));
	td2->td_sigstk = td->td_sigstk;
	td2->td_flags = TDF_INMEM;
	td2->td_lend_user_pri = PRI_MAX;

#ifdef VIMAGE
	td2->td_vnet = NULL;
	td2->td_vnet_lpush = NULL;
#endif

	/*
	 * Allow the scheduler to initialize the child.
	 */
	thread_lock(td);
	sched_fork(td, td2);
	thread_unlock(td);

	/*
	 * Duplicate sub-structures as needed.
	 * Increase reference counts on shared objects.
	 */
	p2->p_flag = P_INMEM;
	p2->p_flag2 = p1->p_flag2 & (P2_NOTRACE | P2_NOTRACE_EXEC | P2_TRAPCAP);
	p2->p_swtick = ticks;
	if (p1->p_flag & P_PROFIL)
		startprofclock(p2);

	/*
	 * Whilst the proc lock is held, copy the VM domain data out
	 * using the VM domain method.
	 */
	vm_domain_policy_init(&p2->p_vm_dom_policy);
	vm_domain_policy_localcopy(&p2->p_vm_dom_policy,
	    &p1->p_vm_dom_policy);

	if (fr->fr_flags & RFSIGSHARE) {
		p2->p_sigacts = sigacts_hold(p1->p_sigacts);
	} else {
		sigacts_copy(newsigacts, p1->p_sigacts);
		p2->p_sigacts = newsigacts;
	}

	if (fr->fr_flags & RFTSIGZMB)
	        p2->p_sigparent = RFTSIGNUM(fr->fr_flags);
	else if (fr->fr_flags & RFLINUXTHPN)
	        p2->p_sigparent = SIGUSR1;
	else
	        p2->p_sigparent = SIGCHLD;

	p2->p_textvp = p1->p_textvp;
	p2->p_fd = fd;
	p2->p_fdtol = fdtol;

	if (p1->p_flag2 & P2_INHERIT_PROTECTED) {
		p2->p_flag |= P_PROTECTED;
		p2->p_flag2 |= P2_INHERIT_PROTECTED;
	}

	/*
	 * p_limit is copy-on-write.  Bump its refcount.
	 */
	lim_fork(p1, p2);

	thread_cow_get_proc(td2, p2);

	pstats_fork(p1->p_stats, p2->p_stats);

	PROC_UNLOCK(p1);
	PROC_UNLOCK(p2);

	/* Bump references to the text vnode (for procfs). */
	if (p2->p_textvp)
		vrefact(p2->p_textvp);

	/*
	 * Set up linkage for kernel based threading.
	 */
	if ((fr->fr_flags & RFTHREAD) != 0) {
		mtx_lock(&ppeers_lock);
		p2->p_peers = p1->p_peers;
		p1->p_peers = p2;
		p2->p_leader = p1->p_leader;
		mtx_unlock(&ppeers_lock);
		PROC_LOCK(p1->p_leader);
		if ((p1->p_leader->p_flag & P_WEXIT) != 0) {
			PROC_UNLOCK(p1->p_leader);
			/*
			 * The task leader is exiting, so process p1 is
			 * going to be killed shortly.  Since p1 obviously
			 * isn't dead yet, we know that the leader is either
			 * sending SIGKILL's to all the processes in this
			 * task or is sleeping waiting for all the peers to
			 * exit.  We let p1 complete the fork, but we need
			 * to go ahead and kill the new process p2 since
			 * the task leader may not get a chance to send
			 * SIGKILL to it.  We leave it on the list so that
			 * the task leader will wait for this new process
			 * to commit suicide.
			 */
			PROC_LOCK(p2);
			kern_psignal(p2, SIGKILL);
			PROC_UNLOCK(p2);
		} else
			PROC_UNLOCK(p1->p_leader);
	} else {
		p2->p_peers = NULL;
		p2->p_leader = p2;
	}

	sx_xlock(&proctree_lock);
	PGRP_LOCK(p1->p_pgrp);
	PROC_LOCK(p2);
	PROC_LOCK(p1);

	/*
	 * Preserve some more flags in subprocess.  P_PROFIL has already
	 * been preserved.
	 */
	p2->p_flag |= p1->p_flag & P_SUGID;
	td2->td_pflags |= (td->td_pflags & TDP_ALTSTACK) | TDP_FORKING;
	SESS_LOCK(p1->p_session);
	if (p1->p_session->s_ttyvp != NULL && p1->p_flag & P_CONTROLT)
		p2->p_flag |= P_CONTROLT;
	SESS_UNLOCK(p1->p_session);
	if (fr->fr_flags & RFPPWAIT)
		p2->p_flag |= P_PPWAIT;

	p2->p_pgrp = p1->p_pgrp;
	LIST_INSERT_AFTER(p1, p2, p_pglist);
	PGRP_UNLOCK(p1->p_pgrp);
	LIST_INIT(&p2->p_children);
	LIST_INIT(&p2->p_orphans);

	callout_init_mtx(&p2->p_itcallout, &p2->p_mtx, 0);

	/*
	 * If PF_FORK is set, the child process inherits the
	 * procfs ioctl flags from its parent.
	 */
	if (p1->p_pfsflags & PF_FORK) {
		p2->p_stops = p1->p_stops;
		p2->p_pfsflags = p1->p_pfsflags;
	}

	/*
	 * This begins the section where we must prevent the parent
	 * from being swapped.
	 */
	_PHOLD(p1);
	PROC_UNLOCK(p1);

	/*
	 * Attach the new process to its parent.
	 *
	 * If RFNOWAIT is set, the newly created process becomes a child
	 * of init.  This effectively disassociates the child from the
	 * parent.
	 */
	if ((fr->fr_flags & RFNOWAIT) != 0) {
		pptr = p1->p_reaper;
		p2->p_reaper = pptr;
	} else {
		p2->p_reaper = (p1->p_treeflag & P_TREE_REAPER) != 0 ?
		    p1 : p1->p_reaper;
		pptr = p1;
	}
	p2->p_pptr = pptr;
	LIST_INSERT_HEAD(&pptr->p_children, p2, p_sibling);
	LIST_INIT(&p2->p_reaplist);
	LIST_INSERT_HEAD(&p2->p_reaper->p_reaplist, p2, p_reapsibling);
	if (p2->p_reaper == p1)
		p2->p_reapsubtree = p2->p_pid;
	sx_xunlock(&proctree_lock);

	/* Inform accounting that we have forked. */
	p2->p_acflag = AFORK;
	PROC_UNLOCK(p2);

#ifdef KTRACE
	ktrprocfork(p1, p2);
#endif

	/*
	 * Finish creating the child process.  It will return via a different
	 * execution path later.  (ie: directly into user mode)
	 */
	vm_forkproc(td, p2, td2, vm2, fr->fr_flags);

	if (fr->fr_flags == (RFFDG | RFPROC)) {
		VM_CNT_INC(v_forks);
		VM_CNT_ADD(v_forkpages, p2->p_vmspace->vm_dsize +
		    p2->p_vmspace->vm_ssize);
	} else if (fr->fr_flags == (RFFDG | RFPROC | RFPPWAIT | RFMEM)) {
		VM_CNT_INC(v_vforks);
		VM_CNT_ADD(v_vforkpages, p2->p_vmspace->vm_dsize +
		    p2->p_vmspace->vm_ssize);
	} else if (p1 == &proc0) {
		VM_CNT_INC(v_kthreads);
		VM_CNT_ADD(v_kthreadpages, p2->p_vmspace->vm_dsize +
		    p2->p_vmspace->vm_ssize);
	} else {
		VM_CNT_INC(v_rforks);
		VM_CNT_ADD(v_rforkpages, p2->p_vmspace->vm_dsize +
		    p2->p_vmspace->vm_ssize);
	}

	/*
	 * Associate the process descriptor with the process before anything
	 * can happen that might cause that process to need the descriptor.
	 * However, don't do this until after fork(2) can no longer fail.
	 */
	if (fr->fr_flags & RFPROCDESC)
		procdesc_new(p2, fr->fr_pd_flags);

	/*
	 * Both processes are set up, now check if any loadable modules want
	 * to adjust anything.
	 */
	EVENTHANDLER_DIRECT_INVOKE(process_fork, p1, p2, fr->fr_flags);

	/*
	 * Set the child start time and mark the process as being complete.
	 */
	PROC_LOCK(p2);
	PROC_LOCK(p1);
	microuptime(&p2->p_stats->p_start);
	PROC_SLOCK(p2);
	p2->p_state = PRS_NORMAL;
	PROC_SUNLOCK(p2);

#ifdef KDTRACE_HOOKS
	/*
	 * Tell the DTrace fasttrap provider about the new process so that any
	 * tracepoints inherited from the parent can be removed. We have to do
	 * this only after p_state is PRS_NORMAL since the fasttrap module will
	 * use pfind() later on.
	 */
	if ((fr->fr_flags & RFMEM) == 0 && dtrace_fasttrap_fork)
		dtrace_fasttrap_fork(p1, p2);
#endif
	/*
	 * Hold the process so that it cannot exit after we make it runnable,
	 * but before we wait for the debugger.
	 */
	_PHOLD(p2);
	if (p1->p_ptevents & PTRACE_FORK) {
		/*
		 * Arrange for debugger to receive the fork event.
		 *
		 * We can report PL_FLAG_FORKED regardless of
		 * P_FOLLOWFORK settings, but it does not make a sense
		 * for runaway child.
		 */
		td->td_dbgflags |= TDB_FORK;
		td->td_dbg_forked = p2->p_pid;
		td2->td_dbgflags |= TDB_STOPATFORK;
	}
	if (fr->fr_flags & RFPPWAIT) {
		td->td_pflags |= TDP_RFPPWAIT;
		td->td_rfppwait_p = p2;
		td->td_dbgflags |= TDB_VFORK;
	}
	PROC_UNLOCK(p2);

	/*
	 * Now can be swapped.
	 */
	_PRELE(p1);
	PROC_UNLOCK(p1);

	/*
	 * Tell any interested parties about the new process.
	 */
	knote_fork(p1->p_klist, p2->p_pid);
	SDT_PROBE3(proc, , , create, p2, p1, fr->fr_flags);

	if (fr->fr_flags & RFPROCDESC) {
		procdesc_finit(p2->p_procdesc, fp_procdesc);
		fdrop(fp_procdesc, td);
	}

	if ((fr->fr_flags & RFSTOPPED) == 0) {
		/*
		 * If RFSTOPPED not requested, make child runnable and
		 * add to run queue.
		 */
		thread_lock(td2);
		TD_SET_CAN_RUN(td2);
		sched_add(td2, SRQ_BORING);
		thread_unlock(td2);
		if (fr->fr_pidp != NULL)
			*fr->fr_pidp = p2->p_pid;
	} else {
		*fr->fr_procp = p2;
	}

	PROC_LOCK(p2);
	/*
	 * Wait until debugger is attached to child.
	 */
	while (td2->td_proc == p2 && (td2->td_dbgflags & TDB_STOPATFORK) != 0)
		cv_wait(&p2->p_dbgwait, &p2->p_mtx);
	_PRELE(p2);
	racct_proc_fork_done(p2);
	PROC_UNLOCK(p2);
}
Exemple #3
0
void
dev_lock(void)
{

	mtx_lock(&devmtx);
}
/*------------------------------------------------------------------------*
 *	usbd_do_request_flags and usbd_do_request
 *
 * Description of arguments passed to these functions:
 *
 * "udev" - this is the "usb_device" structure pointer on which the
 * request should be performed. It is possible to call this function
 * in both Host Side mode and Device Side mode.
 *
 * "mtx" - if this argument is non-NULL the mutex pointed to by it
 * will get dropped and picked up during the execution of this
 * function, hence this function sometimes needs to sleep. If this
 * argument is NULL it has no effect.
 *
 * "req" - this argument must always be non-NULL and points to an
 * 8-byte structure holding the USB request to be done. The USB
 * request structure has a bit telling the direction of the USB
 * request, if it is a read or a write.
 *
 * "data" - if the "wLength" part of the structure pointed to by "req"
 * is non-zero this argument must point to a valid kernel buffer which
 * can hold at least "wLength" bytes. If "wLength" is zero "data" can
 * be NULL.
 *
 * "flags" - here is a list of valid flags:
 *
 *  o USB_SHORT_XFER_OK: allows the data transfer to be shorter than
 *  specified
 *
 *  o USB_DELAY_STATUS_STAGE: allows the status stage to be performed
 *  at a later point in time. This is tunable by the "hw.usb.ss_delay"
 *  sysctl. This flag is mostly useful for debugging.
 *
 *  o USB_USER_DATA_PTR: treat the "data" pointer like a userland
 *  pointer.
 *
 * "actlen" - if non-NULL the actual transfer length will be stored in
 * the 16-bit unsigned integer pointed to by "actlen". This
 * information is mostly useful when the "USB_SHORT_XFER_OK" flag is
 * used.
 *
 * "timeout" - gives the timeout for the control transfer in
 * milliseconds. A "timeout" value less than 50 milliseconds is
 * treated like a 50 millisecond timeout. A "timeout" value greater
 * than 30 seconds is treated like a 30 second timeout. This USB stack
 * does not allow control requests without a timeout.
 *
 * NOTE: This function is thread safe. All calls to
 * "usbd_do_request_flags" will be serialised by the use of an
 * internal "sx_lock".
 *
 * Returns:
 *    0: Success
 * Else: Failure
 *------------------------------------------------------------------------*/
usb_error_t
usbd_do_request_flags(struct usb_device *udev, struct mtx *mtx,
    struct usb_device_request *req, void *data, uint16_t flags,
    uint16_t *actlen, usb_timeout_t timeout)
{
	usb_handle_req_t *hr_func;
	struct usb_xfer *xfer;
	const void *desc;
	int err = 0;
	usb_ticks_t start_ticks;
	usb_ticks_t delta_ticks;
	usb_ticks_t max_ticks;
	uint16_t length;
	uint16_t temp;

	if (timeout < 50) {
		/* timeout is too small */
		timeout = 50;
	}
	if (timeout > 30000) {
		/* timeout is too big */
		timeout = 30000;
	}
	length = UGETW(req->wLength);

	DPRINTFN(5, "udev=%p bmRequestType=0x%02x bRequest=0x%02x "
	    "wValue=0x%02x%02x wIndex=0x%02x%02x wLength=0x%02x%02x\n",
	    udev, req->bmRequestType, req->bRequest,
	    req->wValue[1], req->wValue[0],
	    req->wIndex[1], req->wIndex[0],
	    req->wLength[1], req->wLength[0]);

	/* Check if the device is still alive */
	if (udev->state < USB_STATE_POWERED) {
		DPRINTF("usb device has gone\n");
		return (USB_ERR_NOT_CONFIGURED);
	}

	/*
	 * Set "actlen" to a known value in case the caller does not
	 * check the return value:
	 */
	if (actlen)
		*actlen = 0;

#if (USB_HAVE_USER_IO == 0)
	if (flags & USB_USER_DATA_PTR)
		return (USB_ERR_INVAL);
#endif
	if (mtx) {
		mtx_unlock(mtx);
		if (mtx != &Giant) {
			mtx_assert(mtx, MA_NOTOWNED);
		}
	}
	/*
	 * Grab the default sx-lock so that serialisation
	 * is achieved when multiple threads are involved:
	 */

	sx_xlock(udev->default_sx);

	hr_func = usbd_get_hr_func(udev);

	if (hr_func != NULL) {
		DPRINTF("Handle Request function is set\n");

		desc = NULL;
		temp = 0;

		if (!(req->bmRequestType & UT_READ)) {
			if (length != 0) {
				DPRINTFN(1, "The handle request function "
				    "does not support writing data!\n");
				err = USB_ERR_INVAL;
				goto done;
			}
		}

		/* The root HUB code needs the BUS lock locked */

		USB_BUS_LOCK(udev->bus);
		err = (hr_func) (udev, req, &desc, &temp);
		USB_BUS_UNLOCK(udev->bus);

		if (err)
			goto done;

		if (length > temp) {
			if (!(flags & USB_SHORT_XFER_OK)) {
				err = USB_ERR_SHORT_XFER;
				goto done;
			}
			length = temp;
		}
		if (actlen)
			*actlen = length;

		if (length > 0) {
#if USB_HAVE_USER_IO
			if (flags & USB_USER_DATA_PTR) {
				if (copyout(desc, data, length)) {
					err = USB_ERR_INVAL;
					goto done;
				}
			} else
#endif
				bcopy(desc, data, length);
		}
		goto done;		/* success */
	}

	/*
	 * Setup a new USB transfer or use the existing one, if any:
	 */
	usbd_default_transfer_setup(udev);

	xfer = udev->default_xfer[0];
	if (xfer == NULL) {
		/* most likely out of memory */
		err = USB_ERR_NOMEM;
		goto done;
	}
	USB_XFER_LOCK(xfer);

	if (flags & USB_DELAY_STATUS_STAGE)
		xfer->flags.manual_status = 1;
	else
		xfer->flags.manual_status = 0;

	if (flags & USB_SHORT_XFER_OK)
		xfer->flags.short_xfer_ok = 1;
	else
		xfer->flags.short_xfer_ok = 0;

	xfer->timeout = timeout;

	start_ticks = ticks;

	max_ticks = USB_MS_TO_TICKS(timeout);

	usbd_copy_in(xfer->frbuffers, 0, req, sizeof(*req));

	usbd_xfer_set_frame_len(xfer, 0, sizeof(*req));
	xfer->nframes = 2;

	while (1) {
		temp = length;
		if (temp > xfer->max_data_length) {
			temp = usbd_xfer_max_len(xfer);
		}
		usbd_xfer_set_frame_len(xfer, 1, temp);

		if (temp > 0) {
			if (!(req->bmRequestType & UT_READ)) {
#if USB_HAVE_USER_IO
				if (flags & USB_USER_DATA_PTR) {
					USB_XFER_UNLOCK(xfer);
					err = usbd_copy_in_user(xfer->frbuffers + 1,
					    0, data, temp);
					USB_XFER_LOCK(xfer);
					if (err) {
						err = USB_ERR_INVAL;
						break;
					}
				} else
#endif
					usbd_copy_in(xfer->frbuffers + 1,
					    0, data, temp);
			}
			xfer->nframes = 2;
		} else {
			if (xfer->frlengths[0] == 0) {
				if (xfer->flags.manual_status) {
#if USB_DEBUG
					int temp;

					temp = usb_ss_delay;
					if (temp > 5000) {
						temp = 5000;
					}
					if (temp > 0) {
						usb_pause_mtx(
						    xfer->xroot->xfer_mtx,
						    USB_MS_TO_TICKS(temp));
					}
#endif
					xfer->flags.manual_status = 0;
				} else {
					break;
				}
			}
			xfer->nframes = 1;
		}

		usbd_transfer_start(xfer);

		while (usbd_transfer_pending(xfer)) {
			cv_wait(udev->default_cv,
			    xfer->xroot->xfer_mtx);
		}

		err = xfer->error;

		if (err) {
			break;
		}
		/* subtract length of SETUP packet, if any */

		if (xfer->aframes > 0) {
			xfer->actlen -= xfer->frlengths[0];
		} else {
			xfer->actlen = 0;
		}

		/* check for short packet */

		if (temp > xfer->actlen) {
			temp = xfer->actlen;
			length = temp;
		}
		if (temp > 0) {
			if (req->bmRequestType & UT_READ) {
#if USB_HAVE_USER_IO
				if (flags & USB_USER_DATA_PTR) {
					USB_XFER_UNLOCK(xfer);
					err = usbd_copy_out_user(xfer->frbuffers + 1,
					    0, data, temp);
					USB_XFER_LOCK(xfer);
					if (err) {
						err = USB_ERR_INVAL;
						break;
					}
				} else
#endif
					usbd_copy_out(xfer->frbuffers + 1,
					    0, data, temp);
			}
		}
		/*
		 * Clear "frlengths[0]" so that we don't send the setup
		 * packet again:
		 */
		usbd_xfer_set_frame_len(xfer, 0, 0);

		/* update length and data pointer */
		length -= temp;
		data = USB_ADD_BYTES(data, temp);

		if (actlen) {
			(*actlen) += temp;
		}
		/* check for timeout */

		delta_ticks = ticks - start_ticks;
		if (delta_ticks > max_ticks) {
			if (!err) {
				err = USB_ERR_TIMEOUT;
			}
		}
		if (err) {
			break;
		}
	}

	if (err) {
		/*
		 * Make sure that the control endpoint is no longer
		 * blocked in case of a non-transfer related error:
		 */
		usbd_transfer_stop(xfer);
	}
	USB_XFER_UNLOCK(xfer);

done:
	sx_xunlock(udev->default_sx);

	if (mtx) {
		mtx_lock(mtx);
	}
	return ((usb_error_t)err);
}
Exemple #5
0
/*
 * The caller must make sure that the new protocol is fully set up and ready to
 * accept requests before it is registered.
 */
int
pf_proto_register(int family, struct protosw *npr)
{
    VNET_ITERATOR_DECL(vnet_iter);
    struct domain *dp;
    struct protosw *pr, *fpr;

    /* Sanity checks. */
    if (family == 0)
        return (EPFNOSUPPORT);
    if (npr->pr_type == 0)
        return (EPROTOTYPE);
    if (npr->pr_protocol == 0)
        return (EPROTONOSUPPORT);
    if (npr->pr_usrreqs == NULL)
        return (ENXIO);

    /* Try to find the specified domain based on the family. */
    for (dp = domains; dp; dp = dp->dom_next)
        if (dp->dom_family == family)
            goto found;
    return (EPFNOSUPPORT);

found:
    /* Initialize backpointer to struct domain. */
    npr->pr_domain = dp;
    fpr = NULL;

    /*
     * Protect us against races when two protocol registrations for
     * the same protocol happen at the same time.
     */
    mtx_lock(&dom_mtx);

    /* The new protocol must not yet exist. */
    for (pr = dp->dom_protosw; pr < dp->dom_protoswNPROTOSW; pr++) {
        if ((pr->pr_type == npr->pr_type) &&
                (pr->pr_protocol == npr->pr_protocol)) {
            mtx_unlock(&dom_mtx);
            return (EEXIST);	/* XXX: Check only protocol? */
        }
        /* While here, remember the first free spacer. */
        if ((fpr == NULL) && (pr->pr_protocol == PROTO_SPACER))
            fpr = pr;
    }

    /* If no free spacer is found we can't add the new protocol. */
    if (fpr == NULL) {
        mtx_unlock(&dom_mtx);
        return (ENOMEM);
    }

    /* Copy the new struct protosw over the spacer. */
    bcopy(npr, fpr, sizeof(*fpr));

    /* Job is done, no more protection required. */
    mtx_unlock(&dom_mtx);

    /* Initialize and activate the protocol. */
    VNET_LIST_RLOCK();
    VNET_FOREACH(vnet_iter) {
        CURVNET_SET_QUIET(vnet_iter);
        protosw_init(fpr);
        CURVNET_RESTORE();
    }
    VNET_LIST_RUNLOCK();

    return (0);
}
static status_t
callout_thread(void* /*data*/)
{
	status_t status = B_OK;

	do {
		bigtime_t timeout = B_INFINITE_TIMEOUT;

		if (status == B_TIMED_OUT || status == B_OK) {
			// scan timers for new timeout and/or execute a timer
			mutex_lock(&sLock);

			while (true) {
				struct callout* c = (callout*)list_get_next_item(&sTimers, c);
				if (c == NULL)
					break;

				if (c->due < system_time()) {
					struct mtx *mutex = c->c_mtx;

					// execute timer
					list_remove_item(&sTimers, c);
					c->due = -1;
					sCurrentCallout = c;

					mutex_unlock(&sLock);

					if (mutex != NULL)
						mtx_lock(mutex);
				
					c->c_func(c->c_arg);
				
					if (mutex != NULL)
						mtx_unlock(mutex);

					mutex_lock(&sLock);

					sCurrentCallout = NULL;
					c = NULL;
						// restart scanning as we unlocked the list
				} else {
					// calculate new timeout
					if (c->due < timeout)
						timeout = c->due;
				}
			}

			sTimeout = timeout;
			mutex_unlock(&sLock);
		}

		status = acquire_sem_etc(sWaitSem, 1, B_ABSOLUTE_TIMEOUT, timeout);
			// the wait sem normally can't be acquired, so we
			// have to look at the status value the call returns:
			//
			// B_OK - a new timer has been added or canceled
			// B_TIMED_OUT - look for timers to be executed
			// B_BAD_SEM_ID - we are asked to quit
	} while (status != B_BAD_SEM_ID);

	return B_OK;
}
Exemple #7
0
static void
linux_rt_sendsig(sig_t catcher, ksiginfo_t *ksi, sigset_t *mask)
{
	struct thread *td = curthread;
	struct proc *p = td->td_proc;
	struct sigacts *psp;
	struct trapframe *regs;
	struct l_rt_sigframe *fp, frame;
	int oonstack;
	int sig;
	int code;
	
	sig = ksi->ksi_signo;
	code = ksi->ksi_code;
	PROC_LOCK_ASSERT(p, MA_OWNED);
	psp = p->p_sigacts;
	mtx_assert(&psp->ps_mtx, MA_OWNED);
	regs = td->td_frame;
	oonstack = sigonstack(regs->tf_rsp);

#ifdef DEBUG
	if (ldebug(rt_sendsig))
		printf(ARGS(rt_sendsig, "%p, %d, %p, %u"),
		    catcher, sig, (void*)mask, code);
#endif
	/*
	 * Allocate space for the signal handler context.
	 */
	if ((td->td_pflags & TDP_ALTSTACK) && !oonstack &&
	    SIGISMEMBER(psp->ps_sigonstack, sig)) {
		fp = (struct l_rt_sigframe *)(td->td_sigstk.ss_sp +
		    td->td_sigstk.ss_size - sizeof(struct l_rt_sigframe));
	} else
		fp = (struct l_rt_sigframe *)regs->tf_rsp - 1;
	mtx_unlock(&psp->ps_mtx);

	/*
	 * Build the argument list for the signal handler.
	 */
	if (p->p_sysent->sv_sigtbl)
		if (sig <= p->p_sysent->sv_sigsize)
			sig = p->p_sysent->sv_sigtbl[_SIG_IDX(sig)];

	bzero(&frame, sizeof(frame));

	frame.sf_handler = PTROUT(catcher);
	frame.sf_sig = sig;
	frame.sf_siginfo = PTROUT(&fp->sf_si);
	frame.sf_ucontext = PTROUT(&fp->sf_sc);

	/* Fill in POSIX parts */
	ksiginfo_to_lsiginfo(ksi, &frame.sf_si, sig);

	/*
	 * Build the signal context to be used by sigreturn.
	 */
	frame.sf_sc.uc_flags = 0;		/* XXX ??? */
	frame.sf_sc.uc_link = 0;		/* XXX ??? */

	frame.sf_sc.uc_stack.ss_sp = PTROUT(td->td_sigstk.ss_sp);
	frame.sf_sc.uc_stack.ss_size = td->td_sigstk.ss_size;
	frame.sf_sc.uc_stack.ss_flags = (td->td_pflags & TDP_ALTSTACK)
	    ? ((oonstack) ? LINUX_SS_ONSTACK : 0) : LINUX_SS_DISABLE;
	PROC_UNLOCK(p);

	bsd_to_linux_sigset(mask, &frame.sf_sc.uc_sigmask);

	frame.sf_sc.uc_mcontext.sc_mask   = frame.sf_sc.uc_sigmask.__bits[0];
	frame.sf_sc.uc_mcontext.sc_edi    = regs->tf_rdi;
	frame.sf_sc.uc_mcontext.sc_esi    = regs->tf_rsi;
	frame.sf_sc.uc_mcontext.sc_ebp    = regs->tf_rbp;
	frame.sf_sc.uc_mcontext.sc_ebx    = regs->tf_rbx;
	frame.sf_sc.uc_mcontext.sc_edx    = regs->tf_rdx;
	frame.sf_sc.uc_mcontext.sc_ecx    = regs->tf_rcx;
	frame.sf_sc.uc_mcontext.sc_eax    = regs->tf_rax;
	frame.sf_sc.uc_mcontext.sc_eip    = regs->tf_rip;
	frame.sf_sc.uc_mcontext.sc_cs     = regs->tf_cs;
	frame.sf_sc.uc_mcontext.sc_gs     = regs->tf_gs;
	frame.sf_sc.uc_mcontext.sc_fs     = regs->tf_fs;
	frame.sf_sc.uc_mcontext.sc_es     = regs->tf_es;
	frame.sf_sc.uc_mcontext.sc_ds     = regs->tf_ds;
	frame.sf_sc.uc_mcontext.sc_eflags = regs->tf_rflags;
	frame.sf_sc.uc_mcontext.sc_esp_at_signal = regs->tf_rsp;
	frame.sf_sc.uc_mcontext.sc_ss     = regs->tf_ss;
	frame.sf_sc.uc_mcontext.sc_err    = regs->tf_err;
	frame.sf_sc.uc_mcontext.sc_cr2    = (u_int32_t)(uintptr_t)ksi->ksi_addr;
	frame.sf_sc.uc_mcontext.sc_trapno = bsd_to_linux_trapcode(code);

#ifdef DEBUG
	if (ldebug(rt_sendsig))
		printf(LMSG("rt_sendsig flags: 0x%x, sp: %p, ss: 0x%lx, mask: 0x%x"),
		    frame.sf_sc.uc_stack.ss_flags, td->td_sigstk.ss_sp,
		    td->td_sigstk.ss_size, frame.sf_sc.uc_mcontext.sc_mask);
#endif

	if (copyout(&frame, fp, sizeof(frame)) != 0) {
		/*
		 * Process has trashed its stack; give it an illegal
		 * instruction to halt it in its tracks.
		 */
#ifdef DEBUG
		if (ldebug(rt_sendsig))
			printf(LMSG("rt_sendsig: bad stack %p, oonstack=%x"),
			    fp, oonstack);
#endif
		PROC_LOCK(p);
		sigexit(td, SIGILL);
	}

	/*
	 * Build context to run handler in.
	 */
	regs->tf_rsp = PTROUT(fp);
	regs->tf_rip = p->p_sysent->sv_sigcode_base + linux_sznonrtsigcode;
	regs->tf_rflags &= ~(PSL_T | PSL_D);
	regs->tf_cs = _ucode32sel;
	regs->tf_ss = _udatasel;
	regs->tf_ds = _udatasel;
	regs->tf_es = _udatasel;
	regs->tf_fs = _ufssel;
	regs->tf_gs = _ugssel;
	regs->tf_flags = TF_HASSEGS;
	set_pcb_flags(td->td_pcb, PCB_FULL_IRET);
	PROC_LOCK(p);
	mtx_lock(&psp->ps_mtx);
}
static int
ring_shrink(struct ioat_softc *ioat, uint32_t oldorder,
    struct ioat_descriptor **newring)
{
	struct ioat_dma_hw_descriptor *hw;
	struct ioat_descriptor *ent, *next;
	uint32_t oldsize, newsize, current_idx, new_idx, i;
	int error;

	CTR0(KTR_IOAT, __func__);

	mtx_assert(&ioat->submit_lock, MA_OWNED);

	if (oldorder != ioat->ring_size_order || oldorder <= IOAT_MIN_ORDER) {
		error = EINVAL;
		goto out_unlocked;
	}

	oldsize = (1 << oldorder);
	newsize = (1 << (oldorder - 1));

	mtx_lock(&ioat->cleanup_lock);

	/* Can't shrink below current active set! */
	if (ioat_get_active(ioat) >= newsize) {
		error = ENOMEM;
		goto out;
	}

	/*
	 * Copy current descriptors to the new ring, dropping the removed
	 * descriptors.
	 */
	for (i = 0; i < newsize; i++) {
		current_idx = (ioat->tail + i) & (oldsize - 1);
		new_idx = (ioat->tail + i) & (newsize - 1);

		newring[new_idx] = ioat->ring[current_idx];
		newring[new_idx]->id = new_idx;
	}

	/* Free deleted descriptors */
	for (i = newsize; i < oldsize; i++) {
		ent = ioat_get_ring_entry(ioat, ioat->tail + i);
		ioat_free_ring_entry(ioat, ent);
	}

	/* Fix up hardware ring. */
	hw = newring[(ioat->tail + newsize - 1) & (newsize - 1)]->u.dma;
	next = newring[(ioat->tail + newsize) & (newsize - 1)];
	hw->next = next->hw_desc_bus_addr;

	free(ioat->ring, M_IOAT);
	ioat->ring = newring;
	ioat->ring_size_order = oldorder - 1;
	error = 0;

out:
	mtx_unlock(&ioat->cleanup_lock);
out_unlocked:
	if (error)
		ioat_free_ring(ioat, (1 << (oldorder - 1)), newring);
	return (error);
}
static int
ioat_reset_hw(struct ioat_softc *ioat)
{
	uint64_t status;
	uint32_t chanerr;
	unsigned timeout;
	int error;

	mtx_lock(IOAT_REFLK);
	ioat->quiescing = TRUE;
	ioat_drain_locked(ioat);
	mtx_unlock(IOAT_REFLK);

	status = ioat_get_chansts(ioat);
	if (is_ioat_active(status) || is_ioat_idle(status))
		ioat_suspend(ioat);

	/* Wait at most 20 ms */
	for (timeout = 0; (is_ioat_active(status) || is_ioat_idle(status)) &&
	    timeout < 20; timeout++) {
		DELAY(1000);
		status = ioat_get_chansts(ioat);
	}
	if (timeout == 20) {
		error = ETIMEDOUT;
		goto out;
	}

	KASSERT(ioat_get_active(ioat) == 0, ("active after quiesce"));

	chanerr = ioat_read_4(ioat, IOAT_CHANERR_OFFSET);
	ioat_write_4(ioat, IOAT_CHANERR_OFFSET, chanerr);

	/*
	 * IOAT v3 workaround - CHANERRMSK_INT with 3E07h to masks out errors
	 *  that can cause stability issues for IOAT v3.
	 */
	pci_write_config(ioat->device, IOAT_CFG_CHANERRMASK_INT_OFFSET, 0x3e07,
	    4);
	chanerr = pci_read_config(ioat->device, IOAT_CFG_CHANERR_INT_OFFSET, 4);
	pci_write_config(ioat->device, IOAT_CFG_CHANERR_INT_OFFSET, chanerr, 4);

	/*
	 * BDXDE and BWD models reset MSI-X registers on device reset.
	 * Save/restore their contents manually.
	 */
	if (ioat_model_resets_msix(ioat)) {
		ioat_log_message(1, "device resets MSI-X registers; saving\n");
		pci_save_state(ioat->device);
	}

	ioat_reset(ioat);

	/* Wait at most 20 ms */
	for (timeout = 0; ioat_reset_pending(ioat) && timeout < 20; timeout++)
		DELAY(1000);
	if (timeout == 20) {
		error = ETIMEDOUT;
		goto out;
	}

	if (ioat_model_resets_msix(ioat)) {
		ioat_log_message(1, "device resets registers; restored\n");
		pci_restore_state(ioat->device);
	}

	/* Reset attempts to return the hardware to "halted." */
	status = ioat_get_chansts(ioat);
	if (is_ioat_active(status) || is_ioat_idle(status)) {
		/* So this really shouldn't happen... */
		ioat_log_message(0, "Device is active after a reset?\n");
		ioat_write_chanctrl(ioat, IOAT_CHANCTRL_RUN);
		error = 0;
		goto out;
	}

	chanerr = ioat_read_4(ioat, IOAT_CHANERR_OFFSET);
	if (chanerr != 0) {
		mtx_lock(&ioat->cleanup_lock);
		ioat_halted_debug(ioat, chanerr);
		mtx_unlock(&ioat->cleanup_lock);
		error = EIO;
		goto out;
	}

	/*
	 * Bring device back online after reset.  Writing CHAINADDR brings the
	 * device back to active.
	 *
	 * The internal ring counter resets to zero, so we have to start over
	 * at zero as well.
	 */
	ioat->tail = ioat->head = ioat->hw_head = 0;
	ioat->last_seen = 0;

	ioat_write_chanctrl(ioat, IOAT_CHANCTRL_RUN);
	ioat_write_chancmp(ioat, ioat->comp_update_bus_addr);
	ioat_write_chainaddr(ioat, ioat->ring[0]->hw_desc_bus_addr);
	error = 0;

out:
	mtx_lock(IOAT_REFLK);
	ioat->quiescing = FALSE;
	mtx_unlock(IOAT_REFLK);

	if (error == 0)
		error = ioat_start_channel(ioat);

	return (error);
}
Exemple #10
0
/*
 * Reserves space in this IOAT descriptor ring by ensuring enough slots remain
 * for 'num_descs'.
 *
 * If mflags contains M_WAITOK, blocks until enough space is available.
 *
 * Returns zero on success, or an errno on error.  If num_descs is beyond the
 * maximum ring size, returns EINVAl; if allocation would block and mflags
 * contains M_NOWAIT, returns EAGAIN.
 *
 * Must be called with the submit_lock held; returns with the lock held.  The
 * lock may be dropped to allocate the ring.
 *
 * (The submit_lock is needed to add any entries to the ring, so callers are
 * assured enough room is available.)
 */
static int
ioat_reserve_space(struct ioat_softc *ioat, uint32_t num_descs, int mflags)
{
	struct ioat_descriptor **new_ring;
	uint32_t order;
	int error;

	mtx_assert(&ioat->submit_lock, MA_OWNED);
	error = 0;

	if (num_descs < 1 || num_descs > (1 << IOAT_MAX_ORDER)) {
		error = EINVAL;
		goto out;
	}
	if (ioat->quiescing) {
		error = ENXIO;
		goto out;
	}

	for (;;) {
		if (ioat_get_ring_space(ioat) >= num_descs)
			goto out;

		order = ioat->ring_size_order;
		if (ioat->is_resize_pending || order == IOAT_MAX_ORDER) {
			if ((mflags & M_WAITOK) != 0) {
				msleep(&ioat->tail, &ioat->submit_lock, 0,
				    "ioat_rsz", 0);
				continue;
			}

			error = EAGAIN;
			break;
		}

		ioat->is_resize_pending = TRUE;
		for (;;) {
			mtx_unlock(&ioat->submit_lock);

			new_ring = ioat_prealloc_ring(ioat, 1 << (order + 1),
			    TRUE, mflags);

			mtx_lock(&ioat->submit_lock);
			KASSERT(ioat->ring_size_order == order,
			    ("is_resize_pending should protect order"));

			if (new_ring == NULL) {
				KASSERT((mflags & M_WAITOK) == 0,
				    ("allocation failed"));
				error = EAGAIN;
				break;
			}

			error = ring_grow(ioat, order, new_ring);
			if (error == 0)
				break;
		}
		ioat->is_resize_pending = FALSE;
		wakeup(&ioat->tail);
		if (error)
			break;
	}

out:
	mtx_assert(&ioat->submit_lock, MA_OWNED);
	return (error);
}
Exemple #11
0
static int
ring_grow(struct ioat_softc *ioat, uint32_t oldorder,
    struct ioat_descriptor **newring)
{
	struct ioat_descriptor *tmp, *next;
	struct ioat_dma_hw_descriptor *hw;
	uint32_t oldsize, newsize, head, tail, i, end;
	int error;

	CTR0(KTR_IOAT, __func__);

	mtx_assert(&ioat->submit_lock, MA_OWNED);

	if (oldorder != ioat->ring_size_order || oldorder >= IOAT_MAX_ORDER) {
		error = EINVAL;
		goto out;
	}

	oldsize = (1 << oldorder);
	newsize = (1 << (oldorder + 1));

	mtx_lock(&ioat->cleanup_lock);

	head = ioat->head & (oldsize - 1);
	tail = ioat->tail & (oldsize - 1);

	/* Copy old descriptors to new ring */
	for (i = 0; i < oldsize; i++)
		newring[i] = ioat->ring[i];

	/*
	 * If head has wrapped but tail hasn't, we must swap some descriptors
	 * around so that tail can increment directly to head.
	 */
	if (head < tail) {
		for (i = 0; i <= head; i++) {
			tmp = newring[oldsize + i];

			newring[oldsize + i] = newring[i];
			newring[oldsize + i]->id = oldsize + i;

			newring[i] = tmp;
			newring[i]->id = i;
		}
		head += oldsize;
	}

	KASSERT(head >= tail, ("invariants"));

	/* Head didn't wrap; we only need to link in oldsize..newsize */
	if (head < oldsize) {
		i = oldsize - 1;
		end = newsize;
	} else {
		/* Head did wrap; link newhead..newsize and 0..oldhead */
		i = head;
		end = newsize + (head - oldsize) + 1;
	}

	/*
	 * Fix up hardware ring, being careful not to trample the active
	 * section (tail -> head).
	 */
	for (; i < end; i++) {
		KASSERT((i & (newsize - 1)) < tail ||
		    (i & (newsize - 1)) >= head, ("trampling snake"));

		next = newring[(i + 1) & (newsize - 1)];
		hw = newring[i & (newsize - 1)]->u.dma;
		hw->next = next->hw_desc_bus_addr;
	}

	free(ioat->ring, M_IOAT);
	ioat->ring = newring;
	ioat->ring_size_order = oldorder + 1;
	ioat->tail = tail;
	ioat->head = head;
	error = 0;

	mtx_unlock(&ioat->cleanup_lock);
out:
	if (error)
		ioat_free_ring(ioat, (1 << (oldorder + 1)), newring);
	return (error);
}
Exemple #12
0
/*------------------------------------------------------------------------*
 *	usb_pc_alloc_mem - allocate DMA'able memory
 *
 * Returns:
 *    0: Success
 * Else: Failure
 *------------------------------------------------------------------------*/
uint8_t
usb_pc_alloc_mem(struct usb_page_cache *pc, struct usb_page *pg,
    usb_size_t size, usb_size_t align)
{
	struct usb_dma_parent_tag *uptag;
	struct usb_dma_tag *utag;
	bus_dmamap_t map;
	void *ptr;
	int err;

	uptag = pc->tag_parent;

	if (align != 1) {
		/*
	         * The alignment must be greater or equal to the
	         * "size" else the object can be split between two
	         * memory pages and we get a problem!
	         */
		while (align < size) {
			align *= 2;
			if (align == 0) {
				goto error;
			}
		}
#if 1
		/*
		 * XXX BUS-DMA workaround - FIXME later:
		 *
		 * We assume that that the aligment at this point of
		 * the code is greater than or equal to the size and
		 * less than two times the size, so that if we double
		 * the size, the size will be greater than the
		 * alignment.
		 *
		 * The bus-dma system has a check for "alignment"
		 * being less than "size". If that check fails we end
		 * up using contigmalloc which is page based even for
		 * small allocations. Try to avoid that to save
		 * memory, hence we sometimes to a large number of
		 * small allocations!
		 */
		if (size <= (USB_PAGE_SIZE / 2)) {
			size *= 2;
		}
#endif
	}
	/* get the correct DMA tag */
	utag = usb_dma_tag_find(uptag, size, align);
	if (utag == NULL) {
		goto error;
	}
	/* allocate memory */
	if (bus_dmamem_alloc(
	    utag->tag, &ptr, (BUS_DMA_WAITOK | BUS_DMA_COHERENT), &map)) {
		goto error;
	}
	/* setup page cache */
	pc->buffer = ptr;
	pc->page_start = pg;
	pc->page_offset_buf = 0;
	pc->page_offset_end = size;
	pc->map = map;
	pc->tag = utag->tag;
	pc->ismultiseg = (align == 1);

	mtx_lock(uptag->mtx);

	/* load memory into DMA */
	err = bus_dmamap_load(
	    utag->tag, map, ptr, size, &usb_pc_alloc_mem_cb,
	    pc, (BUS_DMA_WAITOK | BUS_DMA_COHERENT));

	if (err == EINPROGRESS) {
		cv_wait(uptag->cv, uptag->mtx);
		err = 0;
	}
	mtx_unlock(uptag->mtx);

	if (err || uptag->dma_error) {
		bus_dmamem_free(utag->tag, ptr, map);
		goto error;
	}
	memset(ptr, 0, size);

	usb_pc_cpu_flush(pc);

	return (0);

error:
	/* reset most of the page cache */
	pc->buffer = NULL;
	pc->page_start = NULL;
	pc->page_offset_buf = 0;
	pc->page_offset_end = 0;
	pc->map = NULL;
	pc->tag = NULL;
	return (1);
}
Exemple #13
0
void
_thread_lock_flags(struct thread *td, int opts, const char *file, int line)
{

	mtx_lock(td->td_lock);
}
Exemple #14
0
static void
lock_mtx(struct lock_object *lock, int how)
{

	mtx_lock((struct mtx *)lock);
}
Exemple #15
0
/**
 * Query the implementation's VdpVideoSurface GetBits/PutBits capabilities.
 */
VdpStatus
vlVdpVideoSurfaceQueryGetPutBitsYCbCrCapabilities(VdpDevice device, VdpChromaType surface_chroma_type,
                                                  VdpYCbCrFormat bits_ycbcr_format,
                                                  VdpBool *is_supported)
{
   vlVdpDevice *dev;
   struct pipe_screen *pscreen;

   if (!is_supported)
      return VDP_STATUS_INVALID_POINTER;

   dev = vlGetDataHTAB(device);
   if (!dev)
      return VDP_STATUS_INVALID_HANDLE;

   pscreen = dev->vscreen->pscreen;
   if (!pscreen)
      return VDP_STATUS_RESOURCES;

   mtx_lock(&dev->mutex);

   switch(bits_ycbcr_format) {
   case VDP_YCBCR_FORMAT_NV12:
      *is_supported = surface_chroma_type == VDP_CHROMA_TYPE_420;
      break;

   case VDP_YCBCR_FORMAT_YV12:
      *is_supported = surface_chroma_type == VDP_CHROMA_TYPE_420;

      /* We can convert YV12 to NV12 on the fly! */
      if (*is_supported &&
          pscreen->is_video_format_supported(pscreen,
                                             PIPE_FORMAT_NV12,
                                             PIPE_VIDEO_PROFILE_UNKNOWN,
                                             PIPE_VIDEO_ENTRYPOINT_BITSTREAM)) {
         mtx_unlock(&dev->mutex);
         return VDP_STATUS_OK;
      }
      break;

   case VDP_YCBCR_FORMAT_UYVY:
   case VDP_YCBCR_FORMAT_YUYV:
      *is_supported = surface_chroma_type == VDP_CHROMA_TYPE_422;
      break;

   case VDP_YCBCR_FORMAT_Y8U8V8A8:
   case VDP_YCBCR_FORMAT_V8U8Y8A8:
      *is_supported = surface_chroma_type == VDP_CHROMA_TYPE_444;
      break;

   default:
      *is_supported = false;
      break;
   }

   *is_supported &= pscreen->is_video_format_supported
   (
      pscreen,
      FormatYCBCRToPipe(bits_ycbcr_format),
      PIPE_VIDEO_PROFILE_UNKNOWN,
      PIPE_VIDEO_ENTRYPOINT_BITSTREAM
   );
   mtx_unlock(&dev->mutex);

   return VDP_STATUS_OK;
}
Exemple #16
0
/*
 * Initialize Hardware
 */
static int
ioat3_attach(device_t device)
{
	struct ioat_softc *ioat;
	struct ioat_descriptor **ring;
	struct ioat_descriptor *next;
	struct ioat_dma_hw_descriptor *dma_hw_desc;
	int i, num_descriptors;
	int error;
	uint8_t xfercap;

	error = 0;
	ioat = DEVICE2SOFTC(device);
	ioat->capabilities = ioat_read_dmacapability(ioat);

	ioat_log_message(1, "Capabilities: %b\n", (int)ioat->capabilities,
	    IOAT_DMACAP_STR);

	xfercap = ioat_read_xfercap(ioat);
	ioat->max_xfer_size = 1 << xfercap;

	ioat->intrdelay_supported = (ioat_read_2(ioat, IOAT_INTRDELAY_OFFSET) &
	    IOAT_INTRDELAY_SUPPORTED) != 0;
	if (ioat->intrdelay_supported)
		ioat->intrdelay_max = IOAT_INTRDELAY_US_MASK;

	/* TODO: need to check DCA here if we ever do XOR/PQ */

	mtx_init(&ioat->submit_lock, "ioat_submit", NULL, MTX_DEF);
	mtx_init(&ioat->cleanup_lock, "ioat_cleanup", NULL, MTX_DEF);
	callout_init(&ioat->timer, 1);

	/* Establish lock order for Witness */
	mtx_lock(&ioat->submit_lock);
	mtx_lock(&ioat->cleanup_lock);
	mtx_unlock(&ioat->cleanup_lock);
	mtx_unlock(&ioat->submit_lock);

	ioat->is_resize_pending = FALSE;
	ioat->is_completion_pending = FALSE;
	ioat->is_reset_pending = FALSE;
	ioat->is_channel_running = FALSE;

	bus_dma_tag_create(bus_get_dma_tag(ioat->device), sizeof(uint64_t), 0x0,
	    BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
	    sizeof(uint64_t), 1, sizeof(uint64_t), 0, NULL, NULL,
	    &ioat->comp_update_tag);

	error = bus_dmamem_alloc(ioat->comp_update_tag,
	    (void **)&ioat->comp_update, BUS_DMA_ZERO, &ioat->comp_update_map);
	if (ioat->comp_update == NULL)
		return (ENOMEM);

	error = bus_dmamap_load(ioat->comp_update_tag, ioat->comp_update_map,
	    ioat->comp_update, sizeof(uint64_t), ioat_comp_update_map, ioat,
	    0);
	if (error != 0)
		return (error);

	ioat->ring_size_order = IOAT_MIN_ORDER;

	num_descriptors = 1 << ioat->ring_size_order;

	bus_dma_tag_create(bus_get_dma_tag(ioat->device), 0x40, 0x0,
	    BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
	    sizeof(struct ioat_dma_hw_descriptor), 1,
	    sizeof(struct ioat_dma_hw_descriptor), 0, NULL, NULL,
	    &ioat->hw_desc_tag);

	ioat->ring = malloc(num_descriptors * sizeof(*ring), M_IOAT,
	    M_ZERO | M_WAITOK);
	if (ioat->ring == NULL)
		return (ENOMEM);

	ring = ioat->ring;
	for (i = 0; i < num_descriptors; i++) {
		ring[i] = ioat_alloc_ring_entry(ioat, M_WAITOK);
		if (ring[i] == NULL)
			return (ENOMEM);

		ring[i]->id = i;
	}

	for (i = 0; i < num_descriptors - 1; i++) {
		next = ring[i + 1];
		dma_hw_desc = ring[i]->u.dma;

		dma_hw_desc->next = next->hw_desc_bus_addr;
	}

	ring[i]->u.dma->next = ring[0]->hw_desc_bus_addr;

	ioat->head = ioat->hw_head = 0;
	ioat->tail = 0;
	ioat->last_seen = 0;
	return (0);
}
Exemple #17
0
static __inline void rd_kafka_timers_lock (rd_kafka_timers_t *rkts) {
        mtx_lock(&rkts->rkts_lock);
}
Exemple #18
0
static void
ioat_process_events(struct ioat_softc *ioat)
{
	struct ioat_descriptor *desc;
	struct bus_dmadesc *dmadesc;
	uint64_t comp_update, status;
	uint32_t completed, chanerr;
	int error;

	mtx_lock(&ioat->cleanup_lock);

	completed = 0;
	comp_update = *ioat->comp_update;
	status = comp_update & IOAT_CHANSTS_COMPLETED_DESCRIPTOR_MASK;

	CTR0(KTR_IOAT, __func__);

	if (status == ioat->last_seen)
		goto out;

	while (1) {
		desc = ioat_get_ring_entry(ioat, ioat->tail);
		dmadesc = &desc->bus_dmadesc;
		CTR1(KTR_IOAT, "completing desc %d", ioat->tail);

		if (dmadesc->callback_fn != NULL)
			dmadesc->callback_fn(dmadesc->callback_arg, 0);

		completed++;
		ioat->tail++;
		if (desc->hw_desc_bus_addr == status)
			break;
	}

	ioat->last_seen = desc->hw_desc_bus_addr;

	if (ioat->head == ioat->tail) {
		ioat->is_completion_pending = FALSE;
		callout_reset(&ioat->timer, IOAT_INTR_TIMO,
		    ioat_timer_callback, ioat);
	}

	ioat->stats.descriptors_processed += completed;

out:
	ioat_write_chanctrl(ioat, IOAT_CHANCTRL_RUN);
	mtx_unlock(&ioat->cleanup_lock);

	ioat_putn(ioat, completed, IOAT_ACTIVE_DESCR_REF);
	wakeup(&ioat->tail);

	if (!is_ioat_halted(comp_update))
		return;

	ioat->stats.channel_halts++;

	/*
	 * Fatal programming error on this DMA channel.  Flush any outstanding
	 * work with error status and restart the engine.
	 */
	ioat_log_message(0, "Channel halted due to fatal programming error\n");
	mtx_lock(&ioat->submit_lock);
	mtx_lock(&ioat->cleanup_lock);
	ioat->quiescing = TRUE;

	chanerr = ioat_read_4(ioat, IOAT_CHANERR_OFFSET);
	ioat_halted_debug(ioat, chanerr);
	ioat->stats.last_halt_chanerr = chanerr;

	while (ioat_get_active(ioat) > 0) {
		desc = ioat_get_ring_entry(ioat, ioat->tail);
		dmadesc = &desc->bus_dmadesc;
		CTR1(KTR_IOAT, "completing err desc %d", ioat->tail);

		if (dmadesc->callback_fn != NULL)
			dmadesc->callback_fn(dmadesc->callback_arg,
			    chanerr_to_errno(chanerr));

		ioat_putn_locked(ioat, 1, IOAT_ACTIVE_DESCR_REF);
		ioat->tail++;
		ioat->stats.descriptors_processed++;
		ioat->stats.descriptors_error++;
	}

	/* Clear error status */
	ioat_write_4(ioat, IOAT_CHANERR_OFFSET, chanerr);

	mtx_unlock(&ioat->cleanup_lock);
	mtx_unlock(&ioat->submit_lock);

	ioat_log_message(0, "Resetting channel to recover from error\n");
	error = ioat_reset_hw(ioat);
	KASSERT(error == 0, ("%s: reset failed: %d", __func__, error));
}
Exemple #19
0
int _como_worker_thread (void *data){
    comoWorker *worker = data;
    QUEUE *q;

    if (worker->ctx == NULL){
        char *argv[3];
        int argc = 3;
        const char *arg = "";
        const char *prefix = "--childWorker";

        argv[0] = (char *)arg;
        argv[1] = worker->file;
        argv[2] = (char *)prefix;

        duk_context *ctx = como_create_new_heap (argc, argv, NULL);
        worker->ctx = ctx;
        como_run(ctx);
    }

    while (1){
        
        como_sleep(1);
        while ( !QUEUE_EMPTY(&worker->queueIn) ){
            mtx_lock(&worker->mtx);
            q = QUEUE_HEAD(&(worker)->queueIn);
            QUEUE_REMOVE(q);
            comoQueue *queue = QUEUE_DATA(q, comoQueue, queue);
            mtx_unlock(&worker->mtx);

            if (worker->destroy != 0){
                goto FREE;
            }

            como_push_worker_value(worker->ctx, queue);

            duk_push_pointer(worker->ctx, worker);
            duk_call(worker->ctx, 2);
            
            FREE :
            if (queue->data != NULL && queue->type != DUK_TYPE_POINTER){
                free(queue->data);
            }

            free(queue);
        }
        
        //call this to run event loop only
        duk_call(worker->ctx, 0);
        
        if (worker->destroy == 1){
            worker->destroy = 2; /* pass destruction to main thread */
            
            duk_push_global_object(worker->ctx);
            duk_get_prop_string(worker->ctx, -1, "process");
            duk_get_prop_string(worker->ctx, -1, "_emitExit");
            // dump_stack(worker->ctx, "PP");
            duk_call(worker->ctx, 0);
            break;
        }
    }

    duk_destroy_heap(worker->ctx);
    return 0;
}
Exemple #20
0
vm_object_t
cdev_pager_allocate(void *handle, enum obj_type tp, struct cdev_pager_ops *ops,
	vm_ooffset_t size, vm_prot_t prot, vm_ooffset_t foff, struct ucred *cred)
{
	cdev_t dev;
	vm_object_t object;
	u_short color;

	/*
	 * Offset should be page aligned.
	 */
	if (foff & PAGE_MASK)
		return (NULL);

	size = round_page64(size);

	if (ops->cdev_pg_ctor(handle, size, prot, foff, cred, &color) != 0)
		return (NULL);

	/*
	 * Look up pager, creating as necessary.
	 */
	mtx_lock(&dev_pager_mtx);
	object = vm_pager_object_lookup(&dev_pager_object_list, handle);
	if (object == NULL) {
		/*
		 * Allocate object and associate it with the pager.
		 */
		object = vm_object_allocate_hold(tp,
						 OFF_TO_IDX(foff + size));
		object->handle = handle;
		object->un_pager.devp.ops = ops;
		object->un_pager.devp.dev = handle;
		TAILQ_INIT(&object->un_pager.devp.devp_pglist);

		/*
		 * handle is only a device for old_dev_pager_ctor.
		 */
		if (ops->cdev_pg_ctor == old_dev_pager_ctor) {
			dev = handle;
			dev->si_object = object;
		}

		TAILQ_INSERT_TAIL(&dev_pager_object_list, object,
		    pager_object_list);

		vm_object_drop(object);
	} else {
		/*
		 * Gain a reference to the object.
		 */
		vm_object_hold(object);
		vm_object_reference_locked(object);
		if (OFF_TO_IDX(foff + size) > object->size)
			object->size = OFF_TO_IDX(foff + size);
		vm_object_drop(object);
	}
	mtx_unlock(&dev_pager_mtx);

	return (object);
}
Exemple #21
0
/*
 * Send an interrupt to process.
 *
 * Stack is set up to allow sigcode stored
 * in u. to call routine, followed by kcall
 * to sigreturn routine below.  After sigreturn
 * resets the signal mask, the stack, and the
 * frame pointer, it returns to the user
 * specified pc, psl.
 */
static void
linux_sendsig(sig_t catcher, ksiginfo_t *ksi, sigset_t *mask)
{
	struct thread *td = curthread;
	struct proc *p = td->td_proc;
	struct sigacts *psp;
	struct trapframe *regs;
	struct l_sigframe *fp, frame;
	l_sigset_t lmask;
	int oonstack, i;
	int sig, code;

	sig = ksi->ksi_signo;
	code = ksi->ksi_code;
	PROC_LOCK_ASSERT(p, MA_OWNED);
	psp = p->p_sigacts;
	mtx_assert(&psp->ps_mtx, MA_OWNED);
	if (SIGISMEMBER(psp->ps_siginfo, sig)) {
		/* Signal handler installed with SA_SIGINFO. */
		linux_rt_sendsig(catcher, ksi, mask);
		return;
	}

	regs = td->td_frame;
	oonstack = sigonstack(regs->tf_rsp);

#ifdef DEBUG
	if (ldebug(sendsig))
		printf(ARGS(sendsig, "%p, %d, %p, %u"),
		    catcher, sig, (void*)mask, code);
#endif

	/*
	 * Allocate space for the signal handler context.
	 */
	if ((td->td_pflags & TDP_ALTSTACK) && !oonstack &&
	    SIGISMEMBER(psp->ps_sigonstack, sig)) {
		fp = (struct l_sigframe *)(td->td_sigstk.ss_sp +
		    td->td_sigstk.ss_size - sizeof(struct l_sigframe));
	} else
		fp = (struct l_sigframe *)regs->tf_rsp - 1;
	mtx_unlock(&psp->ps_mtx);
	PROC_UNLOCK(p);

	/*
	 * Build the argument list for the signal handler.
	 */
	if (p->p_sysent->sv_sigtbl)
		if (sig <= p->p_sysent->sv_sigsize)
			sig = p->p_sysent->sv_sigtbl[_SIG_IDX(sig)];

	bzero(&frame, sizeof(frame));

	frame.sf_handler = PTROUT(catcher);
	frame.sf_sig = sig;

	bsd_to_linux_sigset(mask, &lmask);

	/*
	 * Build the signal context to be used by sigreturn.
	 */
	frame.sf_sc.sc_mask   = lmask.__bits[0];
	frame.sf_sc.sc_gs     = regs->tf_gs;
	frame.sf_sc.sc_fs     = regs->tf_fs;
	frame.sf_sc.sc_es     = regs->tf_es;
	frame.sf_sc.sc_ds     = regs->tf_ds;
	frame.sf_sc.sc_edi    = regs->tf_rdi;
	frame.sf_sc.sc_esi    = regs->tf_rsi;
	frame.sf_sc.sc_ebp    = regs->tf_rbp;
	frame.sf_sc.sc_ebx    = regs->tf_rbx;
	frame.sf_sc.sc_edx    = regs->tf_rdx;
	frame.sf_sc.sc_ecx    = regs->tf_rcx;
	frame.sf_sc.sc_eax    = regs->tf_rax;
	frame.sf_sc.sc_eip    = regs->tf_rip;
	frame.sf_sc.sc_cs     = regs->tf_cs;
	frame.sf_sc.sc_eflags = regs->tf_rflags;
	frame.sf_sc.sc_esp_at_signal = regs->tf_rsp;
	frame.sf_sc.sc_ss     = regs->tf_ss;
	frame.sf_sc.sc_err    = regs->tf_err;
	frame.sf_sc.sc_cr2    = (u_int32_t)(uintptr_t)ksi->ksi_addr;
	frame.sf_sc.sc_trapno = bsd_to_linux_trapcode(code);

	for (i = 0; i < (LINUX_NSIG_WORDS-1); i++)
		frame.sf_extramask[i] = lmask.__bits[i+1];

	if (copyout(&frame, fp, sizeof(frame)) != 0) {
		/*
		 * Process has trashed its stack; give it an illegal
		 * instruction to halt it in its tracks.
		 */
		PROC_LOCK(p);
		sigexit(td, SIGILL);
	}

	/*
	 * Build context to run handler in.
	 */
	regs->tf_rsp = PTROUT(fp);
	regs->tf_rip = p->p_sysent->sv_sigcode_base;
	regs->tf_rflags &= ~(PSL_T | PSL_D);
	regs->tf_cs = _ucode32sel;
	regs->tf_ss = _udatasel;
	regs->tf_ds = _udatasel;
	regs->tf_es = _udatasel;
	regs->tf_fs = _ufssel;
	regs->tf_gs = _ugssel;
	regs->tf_flags = TF_HASSEGS;
	set_pcb_flags(td->td_pcb, PCB_FULL_IRET);
	PROC_LOCK(p);
	mtx_lock(&psp->ps_mtx);
}
Exemple #22
0
/**
 * @brief Acquire the lock on a file
 *
 * @ref squash_file_read, @ref squash_file_write, and @ref
 * squash_file_flush are thread-safe.  This is accomplished by
 * acquiring a lock on while each function is operating in order to
 * ensure exclusive access.
 *
 * If, however, the programmer wishes to call a series of functions
 * and ensure that they are performed without interference, they can
 * manually acquire the lock with this function and use the unlocked
 * variants (@ref squash_file_read_unlocked, @ref
 * squash_file_write_unlocked, and @ref squash_file_flush_unlocked).
 *
 * @note This function has nothing to do with the kind of lock
 * acquired by the [flock](http://linux.die.net/man/2/flock) function.
 *
 * @param file the file to acquire the lock on
 */
void
squash_file_lock (SquashFile* file) {
  assert (file != NULL);

  mtx_lock (&(file->mtx));
}
Exemple #23
0
static int
uhid_ioctl(struct usb_fifo *fifo, u_long cmd, void *addr,
    int fflags)
{
	struct uhid_softc *sc = usb_fifo_softc(fifo);
	struct usb_gen_descriptor *ugd;
	uint32_t size;
	int error = 0;
	uint8_t id;

	switch (cmd) {
	case USB_GET_REPORT_DESC:
		ugd = addr;
		if (sc->sc_repdesc_size > ugd->ugd_maxlen) {
			size = ugd->ugd_maxlen;
		} else {
			size = sc->sc_repdesc_size;
		}
		ugd->ugd_actlen = size;
		if (ugd->ugd_data == NULL)
			break;		/* descriptor length only */
		error = copyout(sc->sc_repdesc_ptr, ugd->ugd_data, size);
		break;

	case USB_SET_IMMED:
		if (!(fflags & FREAD)) {
			error = EPERM;
			break;
		}
		if (*(int *)addr) {

			/* do a test read */

			error = uhid_get_report(sc, UHID_INPUT_REPORT,
			    sc->sc_iid, NULL, NULL, sc->sc_isize);
			if (error) {
				break;
			}
			mtx_lock(&sc->sc_mtx);
			sc->sc_flags |= UHID_FLAG_IMMED;
			mtx_unlock(&sc->sc_mtx);
		} else {
			mtx_lock(&sc->sc_mtx);
			sc->sc_flags &= ~UHID_FLAG_IMMED;
			mtx_unlock(&sc->sc_mtx);
		}
		break;

	case USB_GET_REPORT:
		if (!(fflags & FREAD)) {
			error = EPERM;
			break;
		}
		ugd = addr;
		switch (ugd->ugd_report_type) {
		case UHID_INPUT_REPORT:
			size = sc->sc_isize;
			id = sc->sc_iid;
			break;
		case UHID_OUTPUT_REPORT:
			size = sc->sc_osize;
			id = sc->sc_oid;
			break;
		case UHID_FEATURE_REPORT:
			size = sc->sc_fsize;
			id = sc->sc_fid;
			break;
		default:
			return (EINVAL);
		}
		if (id != 0)
			copyin(ugd->ugd_data, &id, 1);
		error = uhid_get_report(sc, ugd->ugd_report_type, id,
		    NULL, ugd->ugd_data, imin(ugd->ugd_maxlen, size));
		break;

	case USB_SET_REPORT:
		if (!(fflags & FWRITE)) {
			error = EPERM;
			break;
		}
		ugd = addr;
		switch (ugd->ugd_report_type) {
		case UHID_INPUT_REPORT:
			size = sc->sc_isize;
			id = sc->sc_iid;
			break;
		case UHID_OUTPUT_REPORT:
			size = sc->sc_osize;
			id = sc->sc_oid;
			break;
		case UHID_FEATURE_REPORT:
			size = sc->sc_fsize;
			id = sc->sc_fid;
			break;
		default:
			return (EINVAL);
		}
		if (id != 0)
			copyin(ugd->ugd_data, &id, 1);
		error = uhid_set_report(sc, ugd->ugd_report_type, id,
		    NULL, ugd->ugd_data, imin(ugd->ugd_maxlen, size));
		break;

	case USB_GET_REPORT_ID:
		*(int *)addr = 0;	/* XXX: we only support reportid 0? */
		break;

	default:
		error = EINVAL;
		break;
	}
	return (error);
}
Exemple #24
0
static void
ubt_task(void *context, int pending)
{
	ubt_softc_p	sc = context;
	int		task_flags, i;

	UBT_NG_LOCK(sc);
	task_flags = sc->sc_task_flags;
	sc->sc_task_flags = 0;
	UBT_NG_UNLOCK(sc);

	/*
	 * Stop all USB transfers synchronously.
	 * Stop interface #0 and #1 transfers at the same time and in the
	 * same loop. usbd_transfer_drain() will do appropriate locking.
	 */

	if (task_flags & UBT_FLAG_T_STOP_ALL)
		for (i = 0; i < UBT_N_TRANSFER; i ++)
			usbd_transfer_drain(sc->sc_xfer[i]);

	/* Start incoming interrupt and bulk, and all isoc. USB transfers */
	if (task_flags & UBT_FLAG_T_START_ALL) {
		/*
		 * Interface #0
		 */

		mtx_lock(&sc->sc_if_mtx);

		ubt_xfer_start(sc, UBT_IF_0_INTR_DT_RD);
		ubt_xfer_start(sc, UBT_IF_0_BULK_DT_RD);

		/*
		 * Interface #1
		 * Start both read and write isoc. transfers by default.
		 * Get them going all the time even if we have nothing
		 * to send to avoid any delays.
		 */

		ubt_xfer_start(sc, UBT_IF_1_ISOC_DT_RD1);
		ubt_xfer_start(sc, UBT_IF_1_ISOC_DT_RD2);
		ubt_xfer_start(sc, UBT_IF_1_ISOC_DT_WR1);
		ubt_xfer_start(sc, UBT_IF_1_ISOC_DT_WR2);

		mtx_unlock(&sc->sc_if_mtx);
	}

 	/* Start outgoing control transfer */
	if (task_flags & UBT_FLAG_T_START_CTRL) {
		mtx_lock(&sc->sc_if_mtx);
		ubt_xfer_start(sc, UBT_IF_0_CTRL_DT_WR);
		mtx_unlock(&sc->sc_if_mtx);
	}

	/* Start outgoing bulk transfer */
	if (task_flags & UBT_FLAG_T_START_BULK) {
		mtx_lock(&sc->sc_if_mtx);
		ubt_xfer_start(sc, UBT_IF_0_BULK_DT_WR);
		mtx_unlock(&sc->sc_if_mtx);
	}
} /* ubt_task */
Exemple #25
0
/*
 * The caller must make sure the protocol and its functions correctly shut down
 * all sockets and release all locks and memory references.
 */
int
pf_proto_unregister(int family, int protocol, int type)
{
    struct domain *dp;
    struct protosw *pr, *dpr;

    /* Sanity checks. */
    if (family == 0)
        return (EPFNOSUPPORT);
    if (protocol == 0)
        return (EPROTONOSUPPORT);
    if (type == 0)
        return (EPROTOTYPE);

    /* Try to find the specified domain based on the family type. */
    for (dp = domains; dp; dp = dp->dom_next)
        if (dp->dom_family == family)
            goto found;
    return (EPFNOSUPPORT);

found:
    dpr = NULL;

    /* Lock out everyone else while we are manipulating the protosw. */
    mtx_lock(&dom_mtx);

    /* The protocol must exist and only once. */
    for (pr = dp->dom_protosw; pr < dp->dom_protoswNPROTOSW; pr++) {
        if ((pr->pr_type == type) && (pr->pr_protocol == protocol)) {
            if (dpr != NULL) {
                mtx_unlock(&dom_mtx);
                return (EMLINK);   /* Should not happen! */
            } else
                dpr = pr;
        }
    }

    /* Protocol does not exist. */
    if (dpr == NULL) {
        mtx_unlock(&dom_mtx);
        return (EPROTONOSUPPORT);
    }

    /* De-orbit the protocol and make the slot available again. */
    dpr->pr_type = 0;
    dpr->pr_domain = dp;
    dpr->pr_protocol = PROTO_SPACER;
    dpr->pr_flags = 0;
    dpr->pr_input = NULL;
    dpr->pr_output = NULL;
    dpr->pr_ctlinput = NULL;
    dpr->pr_ctloutput = NULL;
    dpr->pr_init = NULL;
    dpr->pr_fasttimo = NULL;
    dpr->pr_slowtimo = NULL;
    dpr->pr_drain = NULL;
    dpr->pr_usrreqs = &nousrreqs;

    /* Job is done, not more protection required. */
    mtx_unlock(&dom_mtx);

    return (0);
}
Exemple #26
0
/*
 * Function to read the kvp request buffer from host
 * and interact with daemon
 */
static void
hv_kvp_process_request(void *context)
{
	uint8_t *kvp_buf;
	hv_vmbus_channel *channel = context;
	uint32_t recvlen = 0;
	uint64_t requestid;
	struct hv_vmbus_icmsg_hdr *icmsghdrp;
	int ret = 0;
	uint64_t pending_cnt = 1;
	
	hv_kvp_log_info("%s: entering hv_kvp_process_request\n", __func__);
	kvp_buf = receive_buffer[HV_KVP];
	ret = hv_vmbus_channel_recv_packet(channel, kvp_buf, 2 * PAGE_SIZE,
		&recvlen, &requestid);

	/*
	 * We start counting only after the daemon registers
	 * and therefore there could be requests pending in 
	 * the VMBus that are not reflected in pending_cnt.
	 * Therefore we continue reading as long as either of
	 * the below conditions is true.
	 */

	while ((pending_cnt>0) || ((ret == 0) && (recvlen > 0))) {

		if ((ret == 0) && (recvlen>0)) {
			
			icmsghdrp = (struct hv_vmbus_icmsg_hdr *)
					&kvp_buf[sizeof(struct hv_vmbus_pipe_hdr)];
	
			hv_kvp_transaction_init(recvlen, channel, requestid, kvp_buf);
			if (icmsghdrp->icmsgtype == HV_ICMSGTYPE_NEGOTIATE) {
				hv_kvp_negotiate_version(icmsghdrp, NULL, kvp_buf);
				hv_kvp_respond_host(ret);
					
				/*
				 * It is ok to not acquire the mutex before setting 
				 * req_in_progress here because negotiation is the
				 * first thing that happens and hence there is no
				 * chance of a race condition.
				 */
				
				kvp_globals.req_in_progress = false;
				hv_kvp_log_info("%s :version negotiated\n", __func__);

			} else {
				if (!kvp_globals.daemon_busy) {

					hv_kvp_log_info("%s: issuing qury to daemon\n", __func__);
					mtx_lock(&kvp_globals.pending_mutex);
					kvp_globals.req_timed_out = false;
					kvp_globals.daemon_busy = true;
					mtx_unlock(&kvp_globals.pending_mutex);

					hv_kvp_send_msg_to_daemon();
					hv_kvp_log_info("%s: waiting for daemon\n", __func__);
				}
				
				/* Wait 5 seconds for daemon to respond back */
				tsleep(&kvp_globals, 0, "kvpworkitem", 5 * hz);
				hv_kvp_log_info("%s: came out of wait\n", __func__);
			}
		}

		mtx_lock(&kvp_globals.pending_mutex);
		
		/* Notice that once req_timed_out is set to true
		 * it will remain true until the next request is
		 * sent to the daemon. The response from daemon
		 * is forwarded to host only when this flag is 
		 * false. 
		 */
		kvp_globals.req_timed_out = true;

		/*
		 * Cancel request if so need be.
		 */
		if (hv_kvp_req_in_progress()) {
			hv_kvp_log_info("%s: request was still active after wait so failing\n", __func__);
			hv_kvp_respond_host(HV_KVP_E_FAIL);
			kvp_globals.req_in_progress = false;	
		}
	
		/*
		* Decrement pending request count and
		*/
		if (kvp_globals.pending_reqs>0) {
			kvp_globals.pending_reqs = kvp_globals.pending_reqs - 1;
		}
		pending_cnt = kvp_globals.pending_reqs;
		
		mtx_unlock(&kvp_globals.pending_mutex);

		/*
		 * Try reading next buffer
		 */
		recvlen = 0;
		ret = hv_vmbus_channel_recv_packet(channel, kvp_buf, 2 * PAGE_SIZE,
			&recvlen, &requestid);
		hv_kvp_log_info("%s: read: context %p, pending_cnt %ju ret =%d, recvlen=%d\n",
			__func__, context, pending_cnt, ret, recvlen);
	} 
}
Exemple #27
0
/**
 * Create a VdpBitmapSurface.
 */
VdpStatus
vlVdpBitmapSurfaceCreate(VdpDevice device,
                         VdpRGBAFormat rgba_format,
                         uint32_t width, uint32_t height,
                         VdpBool frequently_accessed,
                         VdpBitmapSurface *surface)
{
   struct pipe_context *pipe;
   struct pipe_resource res_tmpl, *res;
   struct pipe_sampler_view sv_templ;
   VdpStatus ret;

   vlVdpBitmapSurface *vlsurface = NULL;

   if (!(width && height))
      return VDP_STATUS_INVALID_SIZE;

   vlVdpDevice *dev = vlGetDataHTAB(device);
   if (!dev)
      return VDP_STATUS_INVALID_HANDLE;

   pipe = dev->context;
   if (!pipe)
      return VDP_STATUS_INVALID_HANDLE;

   if (!surface)
      return VDP_STATUS_INVALID_POINTER;

   vlsurface = CALLOC(1, sizeof(vlVdpBitmapSurface));
   if (!vlsurface)
      return VDP_STATUS_RESOURCES;

   DeviceReference(&vlsurface->device, dev);

   memset(&res_tmpl, 0, sizeof(res_tmpl));
   res_tmpl.target = PIPE_TEXTURE_2D;
   res_tmpl.format = VdpFormatRGBAToPipe(rgba_format);
   res_tmpl.width0 = width;
   res_tmpl.height0 = height;
   res_tmpl.depth0 = 1;
   res_tmpl.array_size = 1;
   res_tmpl.bind = PIPE_BIND_SAMPLER_VIEW | PIPE_BIND_RENDER_TARGET;
   res_tmpl.usage = frequently_accessed ? PIPE_USAGE_DYNAMIC : PIPE_USAGE_DEFAULT;

   mtx_lock(&dev->mutex);

   if (!CheckSurfaceParams(pipe->screen, &res_tmpl)) {
      ret = VDP_STATUS_RESOURCES;
      goto err_unlock;
   }

   res = pipe->screen->resource_create(pipe->screen, &res_tmpl);
   if (!res) {
      ret = VDP_STATUS_RESOURCES;
      goto err_unlock;
   }

   vlVdpDefaultSamplerViewTemplate(&sv_templ, res);
   vlsurface->sampler_view = pipe->create_sampler_view(pipe, res, &sv_templ);

   pipe_resource_reference(&res, NULL);

   if (!vlsurface->sampler_view) {
      ret = VDP_STATUS_RESOURCES;
      goto err_unlock;
   }

   mtx_unlock(&dev->mutex);

   *surface = vlAddDataHTAB(vlsurface);
   if (*surface == 0) {
      mtx_lock(&dev->mutex);
      ret = VDP_STATUS_ERROR;
      goto err_sampler;
   }

   return VDP_STATUS_OK;

err_sampler:
   pipe_sampler_view_reference(&vlsurface->sampler_view, NULL);
err_unlock:
   mtx_unlock(&dev->mutex);
   DeviceReference(&vlsurface->device, NULL);
   FREE(vlsurface);
   return ret;
}
Exemple #28
0
/*
 * Stop the interface
 */
void
patm_stop(struct patm_softc *sc)
{
	u_int i;
	struct mbuf *m;
	struct patm_txmap *map;
	struct patm_scd *scd;

	sc->ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
	sc->utopia.flags |= UTP_FL_POLL_CARRIER;

	patm_reset(sc);

	mtx_lock(&sc->tst_lock);
	i = sc->tst_state;
	sc->tst_state = 0;
	callout_stop(&sc->tst_callout);
	mtx_unlock(&sc->tst_lock);

	if (i != 0) {
		/* this means we are just entering or leaving the timeout.
		 * wait a little bit. Doing this correctly would be more
		 * involved */
		DELAY(1000);
	}

	/*
	 * Give any waiters on closing a VCC a chance. They will stop
	 * to wait if they see that IFF_DRV_RUNNING disappeared.
	 */
	cv_broadcast(&sc->vcc_cv);

	/* free large buffers */
	patm_debug(sc, ATTACH, "freeing large buffers...");
	for (i = 0; i < sc->lbuf_max; i++)
		if (sc->lbufs[i].m != NULL)
			patm_lbuf_free(sc, &sc->lbufs[i]);

	/* free small buffers that are on the card */
	patm_debug(sc, ATTACH, "freeing small buffers...");
	mbp_card_free(sc->sbuf_pool);

	/* free aal0 buffers that are on the card */
	patm_debug(sc, ATTACH, "freeing aal0 buffers...");
	mbp_card_free(sc->vbuf_pool);

	/* freeing partial receive chains and reset vcc state */
	for (i = 0; i < sc->mmap->max_conn; i++) {
		if (sc->vccs[i] != NULL) {
			if (sc->vccs[i]->chain != NULL) {
				m_freem(sc->vccs[i]->chain);
				sc->vccs[i]->chain = NULL;
				sc->vccs[i]->last = NULL;
			}

			if (sc->vccs[i]->vflags & (PATM_VCC_RX_CLOSING |
			    PATM_VCC_TX_CLOSING)) {
				uma_zfree(sc->vcc_zone, sc->vccs[i]);
				sc->vccs[i] = NULL;
			} else {
				/* keep */
				sc->vccs[i]->vflags &= ~PATM_VCC_OPEN;
				sc->vccs[i]->cps = 0;
				sc->vccs[i]->scd = NULL;
			}
		}
	}

	/* stop all active SCDs */
	while ((scd = LIST_FIRST(&sc->scd_list)) != NULL) {
		/* free queue packets */
		for (;;) {
			_IF_DEQUEUE(&scd->q, m);
			if (m == NULL)
				break;
			m_freem(m);
		}

		/* free transmitting packets */
		for (i = 0; i < IDT_TSQE_TAG_SPACE; i++) {
			if ((m = scd->on_card[i]) != NULL) {
				scd->on_card[i] = 0;
				map = m->m_pkthdr.header;

				bus_dmamap_unload(sc->tx_tag, map->map);
				SLIST_INSERT_HEAD(&sc->tx_maps_free, map, link);
				m_freem(m);
			}
		}
		patm_scd_free(sc, scd);
	}
	sc->scd0 = NULL;

	sc->flags &= ~PATM_CLR;

	/* reset raw cell queue */
	sc->rawh = NULL;

	ATMEV_SEND_IFSTATE_CHANGED(IFP2IFATM(sc->ifp),
	    sc->utopia.carrier == UTP_CARR_OK);
}
int
patm_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
{
	struct ifreq *ifr = (struct ifreq *)data;
	struct ifaddr *ifa = (struct ifaddr *)data;
	struct patm_softc *sc = ifp->if_softc;
	int error = 0;
	uint32_t cfg;
	struct atmio_vcctable *vtab;

	switch (cmd) {

	  case SIOCSIFADDR:
		mtx_lock(&sc->mtx);
		ifp->if_flags |= IFF_UP;
		if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
			patm_initialize(sc);
		switch (ifa->ifa_addr->sa_family) {

#ifdef INET
		  case AF_INET:
		  case AF_INET6:
			ifa->ifa_rtrequest = atm_rtrequest;
			break;
#endif
		  default:
			break;
		}
		mtx_unlock(&sc->mtx);
		break;

	  case SIOCSIFFLAGS:
		mtx_lock(&sc->mtx);
		if (ifp->if_flags & IFF_UP) {
			if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
				patm_initialize(sc);
			}
		} else {
			if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
				patm_stop(sc);
			}
		}
		mtx_unlock(&sc->mtx);
		break;

	  case SIOCGIFMEDIA:
	  case SIOCSIFMEDIA:
		error = ifmedia_ioctl(ifp, ifr, &sc->media, cmd);

		/*
		 * We need to toggle unassigned/idle cells ourself because
		 * the 77252 generates null cells for spacing. When switching
		 * null cells of it gets the timing wrong.
		 */
		mtx_lock(&sc->mtx);
		if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
			if (sc->utopia.state & UTP_ST_UNASS) {
				if (!(sc->flags & PATM_UNASS)) {
					cfg = patm_nor_read(sc, IDT_NOR_CFG);
					cfg &= ~IDT_CFG_IDLECLP;
					patm_nor_write(sc, IDT_NOR_CFG, cfg);
					sc->flags |= PATM_UNASS;
				}
			} else {
				if (sc->flags & PATM_UNASS) {
					cfg = patm_nor_read(sc, IDT_NOR_CFG);
					cfg |= IDT_CFG_IDLECLP;
					patm_nor_write(sc, IDT_NOR_CFG, cfg);
					sc->flags &= ~PATM_UNASS;
				}
			}
		} else {
			if (sc->utopia.state & UTP_ST_UNASS)
				sc->flags |= PATM_UNASS;
			else
				sc->flags &= ~PATM_UNASS;
		}
		mtx_unlock(&sc->mtx);
		break;

	  case SIOCSIFMTU:
		/*
		 * Set the interface MTU.
		 */
		if (ifr->ifr_mtu > ATMMTU)
			error = EINVAL;
		else
			ifp->if_mtu = ifr->ifr_mtu;
		break;

	  case SIOCATMOPENVCC:		/* kernel internal use */
		error = patm_open_vcc(sc, (struct atmio_openvcc *)data);
		break;

	  case SIOCATMCLOSEVCC:		/* kernel internal use */
		error = patm_close_vcc(sc, (struct atmio_closevcc *)data);
		break;

	  case SIOCATMGVCCS:	/* external use */
#ifdef CPU_CHERI
#error Unvalidatable ifr_data use.  Unsafe with CheriABI.
#endif
		/* return vcc table */
		vtab = atm_getvccs((struct atmio_vcc **)sc->vccs,
		    sc->mmap->max_conn, sc->vccs_open, &sc->mtx, 1);
		error = copyout(vtab, ifr->ifr_data, sizeof(*vtab) +
		    vtab->count * sizeof(vtab->vccs[0]));
		free(vtab, M_DEVBUF);
		break;

	  case SIOCATMGETVCCS:	/* netgraph internal use */
		vtab = atm_getvccs((struct atmio_vcc **)sc->vccs,
		    sc->mmap->max_conn, sc->vccs_open, &sc->mtx, 0);
		if (vtab == NULL) {
			error = ENOMEM;
			break;
		}
		*(void **)data = vtab;
		break;

	  default:
		patm_debug(sc, IOCTL, "unknown cmd=%08lx arg=%p", cmd, data);
		error = EINVAL;
		break;
	}

	return (error);
}
Exemple #30
0
static void
g_uzip_done(struct bio *bp)
{
	z_stream zs;
	struct bio *bp2;
	struct g_provider *pp;
	struct g_consumer *cp;
	struct g_geom *gp;
	struct g_uzip_softc *sc;
	char *data, *data2;
	off_t ofs;
	size_t blk, blkofs, len, ulen;

	bp2 = bp->bio_parent;
	gp = bp2->bio_to->geom;
	sc = gp->softc;

	cp = LIST_FIRST(&gp->consumer);
	pp = cp->provider;

	bp2->bio_error = bp->bio_error;
	if (bp2->bio_error != 0)
		goto done;

	/* Make sure there's forward progress. */
	if (bp->bio_completed == 0) {
		bp2->bio_error = ECANCELED;
		goto done;
	}

	zs.zalloc = z_alloc;
	zs.zfree = z_free;
	if (inflateInit(&zs) != Z_OK) {
		bp2->bio_error = EILSEQ;
		goto done;
	}

	ofs = bp2->bio_offset + bp2->bio_completed;
	blk = ofs / sc->blksz;
	blkofs = ofs % sc->blksz;
	data = bp->bio_data + sc->offsets[blk] % pp->sectorsize;
	data2 = bp2->bio_data + bp2->bio_completed;
	while (bp->bio_completed && bp2->bio_resid) {
		ulen = MIN(sc->blksz - blkofs, bp2->bio_resid);
		len = sc->offsets[blk + 1] - sc->offsets[blk];
		DPRINTF(("%s/%s: %p/%ju: data2=%p, ulen=%u, data=%p, len=%u\n",
		    __func__, gp->name, gp, bp->bio_completed,
		    data2, (u_int)ulen, data, (u_int)len));
		if (len == 0) {
			/* All zero block: no cache update */
			bzero(data2, ulen);
		} else if (len <= bp->bio_completed) {
			zs.next_in = data;
			zs.avail_in = len;
			zs.next_out = sc->last_buf;
			zs.avail_out = sc->blksz;
			mtx_lock(&sc->last_mtx);
			if (inflate(&zs, Z_FINISH) != Z_STREAM_END) {
				sc->last_blk = -1;
				mtx_unlock(&sc->last_mtx);
				inflateEnd(&zs);
				bp2->bio_error = EILSEQ;
				goto done;
			}
			sc->last_blk = blk;
			memcpy(data2, sc->last_buf + blkofs, ulen);
			mtx_unlock(&sc->last_mtx);
			if (inflateReset(&zs) != Z_OK) {
				inflateEnd(&zs);
				bp2->bio_error = EILSEQ;
				goto done;
			}
			data += len;
		} else
			break;

		data2 += ulen;
		bp2->bio_completed += ulen;
		bp2->bio_resid -= ulen;
		bp->bio_completed -= len;
		blkofs = 0;
		blk++;
	}

	if (inflateEnd(&zs) != Z_OK)
		bp2->bio_error = EILSEQ;

done:
	/* Finish processing the request. */
	free(bp->bio_data, M_GEOM_UZIP);
	g_destroy_bio(bp);
	if (bp2->bio_error != 0 || bp2->bio_resid == 0)
		g_io_deliver(bp2, bp2->bio_error);
	else
		g_uzip_request(gp, bp2);
}