Beispiel #1
0
void doexit(uint16_t val)
{
	int16_t j;
	ptptr p;
	irqflags_t irq;

#ifdef DEBUG
	kprintf("process %d exiting %d\n", udata.u_ptab->p_pid, val);

	kprintf
	    ("udata.u_page %u, udata.u_ptab %p, udata.u_ptab->p_page %u\n",
	     udata.u_page, udata.u_ptab, udata.u_ptab->p_page);
#endif
	if (udata.u_ptab->p_pid == 1)
		panic(PANIC_KILLED_INIT);

	sync();		/* Not necessary, but a good idea. */

	irq = di();

	/* We are exiting, hold all signals  (they will never be
	   delivered). If we don't do this we might take a signal
	   while exiting which would be ... unfortunate */
	udata.u_ptab->p_held = 0xFFFFFFFFUL;
	udata.u_cursig = 0;

	/* Discard our memory before we blow away and reuse the memory */
	pagemap_free(udata.u_ptab);

	for (j = 0; j < UFTSIZE; ++j) {
		if (udata.u_files[j] != NO_FILE)
			doclose(j);
	}


	udata.u_ptab->p_exitval = val;

	i_deref(udata.u_cwd);
	i_deref(udata.u_root);

	/* Stash away child's execution tick counts in process table,
	 * overlaying some no longer necessary stuff.
	 *
	 * Pedantically POSIX says we should do this at the point of wait()
	 */
	udata.u_utime += udata.u_cutime;
	udata.u_stime += udata.u_cstime;
	memcpy(&(udata.u_ptab->p_priority), &udata.u_utime,
	       2 * sizeof(clock_t));

	for (p = ptab; p < ptab_end; ++p) {
		if (p->p_status == P_EMPTY || p == udata.u_ptab)
			continue;
		/* Set any child's parents to init */
		if (p->p_pptr == udata.u_ptab) {
			p->p_pptr = ptab;	/* ptab is always init */
			/* Suppose our child is a zombie and init has
			   SIGCLD blocked */
		        if (ptab[0].p_ignored & (1UL << SIGCHLD)) {
				p->p_status = P_EMPTY;
			} else {
				ssig(&ptab[0], SIGCHLD);
				wakeup(&ptab[0]);
			}
		}
		/* Send SIGHUP to any pgrp members and remove
		   them from our pgrp */
                if (p->p_pgrp == udata.u_ptab->p_pid) {
			p->p_pgrp = 0;
			ssig(p, SIGHUP);
			ssig(p, SIGCONT);
		}
	}
	tty_exit();
	irqrestore(irq);
#ifdef DEBUG
	kprintf
	    ("udata.u_page %u, udata.u_ptab %p, udata.u_ptab->p_page %u\n",
	     udata.u_page, udata.u_ptab, udata.u_ptab->p_page);
#endif
#ifdef CONFIG_ACCT
	acctexit(p);
#endif
        udata.u_page = 0xFFFFU;
        udata.u_page2 = 0xFFFFU;
        signal_parent(udata.u_ptab->p_pptr);
	nready--;
	nproc--;

	switchin(getproc());
	panic(PANIC_DOEXIT);
}
Beispiel #2
0
void QtTrader::statusMessage (QString d)
{
  // update the status bar with a new message from somewhere
  statusBar()->showMessage(d, 0);
  wakeup();
}
Beispiel #3
0
static void
tegra_i2c_intr(void *arg)
{
    struct tegra_i2c_softc *sc;
    uint32_t status, reg;
    int rv;

    sc = (struct tegra_i2c_softc *)arg;

    LOCK(sc);
    status = RD4(sc, I2C_INTERRUPT_SOURCE_REGISTER);
    if (sc->msg == NULL) {
        /* Unexpected interrupt - disable FIFOs, clear reset. */
        reg = RD4(sc, I2C_INTERRUPT_MASK_REGISTER);
        reg &= ~I2C_INT_TFIFO_DATA_REQ;
        reg &= ~I2C_INT_RFIFO_DATA_REQ;
        WR4(sc, I2C_INTERRUPT_MASK_REGISTER, 0);
        WR4(sc, I2C_INTERRUPT_STATUS_REGISTER, status);
        UNLOCK(sc);
        return;
    }

    if ((status & I2C_ERROR_MASK) != 0) {
        if (status & I2C_INT_NOACK)
            sc->bus_err = IIC_ENOACK;
        if (status & I2C_INT_ARB_LOST)
            sc->bus_err = IIC_EBUSERR;
        if ((status & I2C_INT_TFIFO_OVR) ||
                (status & I2C_INT_RFIFO_UNF))
            sc->bus_err = IIC_EBUSERR;
        sc->done = 1;
    } else if ((status & I2C_INT_RFIFO_DATA_REQ) &&
               (sc->msg != NULL) && (sc->msg->flags & IIC_M_RD)) {
        rv = tegra_i2c_rx(sc);
        if (rv == 0) {
            reg = RD4(sc, I2C_INTERRUPT_MASK_REGISTER);
            reg &= ~I2C_INT_RFIFO_DATA_REQ;
            WR4(sc, I2C_INTERRUPT_MASK_REGISTER, reg);
        }
    } else if ((status & I2C_INT_TFIFO_DATA_REQ) &&
               (sc->msg != NULL) && !(sc->msg->flags & IIC_M_RD)) {
        rv = tegra_i2c_tx(sc);
        if (rv == 0) {
            reg = RD4(sc, I2C_INTERRUPT_MASK_REGISTER);
            reg &= ~I2C_INT_TFIFO_DATA_REQ;
            WR4(sc, I2C_INTERRUPT_MASK_REGISTER, reg);
        }
    } else if ((status & I2C_INT_RFIFO_DATA_REQ) ||
               (status & I2C_INT_TFIFO_DATA_REQ)) {
        device_printf(sc->dev, "Unexpected data interrupt: 0x%08X\n",
                      status);
        reg = RD4(sc, I2C_INTERRUPT_MASK_REGISTER);
        reg &= ~I2C_INT_TFIFO_DATA_REQ;
        reg &= ~I2C_INT_RFIFO_DATA_REQ;
        WR4(sc, I2C_INTERRUPT_MASK_REGISTER, reg);
    }
    if (status & I2C_INT_PACKET_XFER_COMPLETE)
        sc->done = 1;
    WR4(sc, I2C_INTERRUPT_STATUS_REGISTER, status);
    if (sc->done) {
        WR4(sc, I2C_INTERRUPT_MASK_REGISTER, 0);
        wakeup(&(sc->done));
    }
    UNLOCK(sc);
}
Beispiel #4
0
void
crlintr(void *arg)
{
	struct buf *bp;
	int i;

	bp = crltab.crl_buf;
	i = mfpr(PR_STXCS);
	switch ((i>>24) & 0xFF) {

	case CRL_S_XCMPLT:
		switch (crltab.crl_active) {

		case CRL_F_RETSTS:
			{
				char sbuf[256], sbuf2[256];

				crlstat.crl_ds = mfpr(PR_STXDB);

				snprintb(sbuf, sizeof(sbuf), CRLCS_BITS,
				    crlstat.crl_cs);
				snprintb(sbuf2, sizeof(sbuf2), CRLDS_BITS,
				    crlstat.crl_ds);
				printf("crlcs=0x%s, crlds=0x%s\n", sbuf, sbuf2);
				break;
			}

		case CRL_F_READ:
		case CRL_F_WRITE:
			bp->b_oflags |= BO_DONE;
		}
		crltab.crl_active = 0;
		wakeup(bp);
		break;

	case CRL_S_XCONT:
		switch (crltab.crl_active) {

		case CRL_F_WRITE:
			mtpr(*crltab.crl_xaddr++, PR_STXDB);
			mtpr(bp->b_blkno<<8 | STXCS_IE | CRL_F_WRITE, PR_STXCS);
			break;

		case CRL_F_READ:
			*crltab.crl_xaddr++ = mfpr(PR_STXDB);
			mtpr(bp->b_blkno<<8 | STXCS_IE | CRL_F_READ, PR_STXCS);
		}
		break;

	case CRL_S_ABORT:
		crltab.crl_active = CRL_F_RETSTS;
		mtpr(STXCS_IE | CRL_F_RETSTS, PR_STXCS);
		bp->b_oflags |= BO_DONE;
		bp->b_error = EIO;
		break;

	case CRL_S_RETSTS:
		crlstat.crl_cs = mfpr(PR_STXDB);
		mtpr(STXCS_IE | CRL_S_RETSTS, PR_STXCS);
		break;

	case CRL_S_HNDSHK:
		printf("crl: hndshk error\n");	/* dump out some status too? */
		crltab.crl_active = 0;
		bp->b_oflags |= BO_DONE;
		bp->b_error = EIO;
		cv_broadcast(&bp->b_done);
		break;

	case CRL_S_HWERR:
		printf("crl: hard error sn%" PRId64 "\n", bp->b_blkno);
		crltab.crl_active = CRL_F_ABORT;
		mtpr(STXCS_IE | CRL_F_ABORT, PR_STXCS);
		break;
	}
}
Beispiel #5
0
static void
ptstart(struct cam_periph *periph, union ccb *start_ccb)
{
	struct pt_softc *softc;
	struct buf *bp;
	int s;

	softc = (struct pt_softc *)periph->softc;

	/*
	 * See if there is a buf with work for us to do..
	 */
	s = splbio();
	bp = bufq_first(&softc->buf_queue);
	if (periph->immediate_priority <= periph->pinfo.priority) {
		CAM_DEBUG_PRINT(CAM_DEBUG_SUBTRACE,
				("queuing for immediate ccb\n"));
		start_ccb->ccb_h.ccb_state = PT_CCB_WAITING;
		SLIST_INSERT_HEAD(&periph->ccb_list, &start_ccb->ccb_h,
				  periph_links.sle);
		periph->immediate_priority = CAM_PRIORITY_NONE;
		splx(s);
		wakeup(&periph->ccb_list);
	} else if (bp == NULL) {
		splx(s);
		xpt_release_ccb(start_ccb);
	} else {
		int oldspl;

		bufq_remove(&softc->buf_queue, bp);

		devstat_start_transaction(&softc->device_stats);

		scsi_send_receive(&start_ccb->csio,
				  /*retries*/4,
				  ptdone,
				  MSG_SIMPLE_Q_TAG,
				  bp->b_flags & B_READ,
				  /*byte2*/0,
				  bp->b_bcount,
				  bp->b_data,
				  /*sense_len*/SSD_FULL_SIZE,
				  /*timeout*/softc->io_timeout);

		start_ccb->ccb_h.ccb_state = PT_CCB_BUFFER_IO;

		/*
		 * Block out any asyncronous callbacks
		 * while we touch the pending ccb list.
		 */
		oldspl = splcam();
		LIST_INSERT_HEAD(&softc->pending_ccbs, &start_ccb->ccb_h,
				 periph_links.le);
		splx(oldspl);

		start_ccb->ccb_h.ccb_bp = bp;
		bp = bufq_first(&softc->buf_queue);
		splx(s);

		xpt_action(start_ccb);
		
		if (bp != NULL) {
			/* Have more work to do, so ensure we stay scheduled */
			xpt_schedule(periph, /* XXX priority */1);
		}
	}
}
Beispiel #6
0
/*
 * Retrieve a pointer for the ALQ to write directly into, avoiding bcopy.
 */
struct ale *
alq_getn(struct alq *alq, int len, int flags)
{
	int contigbytes;
	void *waitchan;

	KASSERT((len > 0 && len <= alq->aq_buflen),
	    ("%s: len <= 0 || len > alq->aq_buflen", __func__));

	waitchan = NULL;

	ALQ_LOCK(alq);

	/*
	 * Determine the number of free contiguous bytes.
	 * We ensure elsewhere that if aq_writehead == aq_writetail because
	 * the buffer is empty, they will both be set to 0 and therefore
	 * aq_freebytes == aq_buflen and is fully contiguous.
	 * If they are equal and the buffer is not empty, aq_freebytes will
	 * be 0 indicating the buffer is full.
	 */
	if (alq->aq_writehead <= alq->aq_writetail)
		contigbytes = alq->aq_freebytes;
	else {
		contigbytes = alq->aq_buflen - alq->aq_writehead;

		if (contigbytes < len) {
			/*
			 * Insufficient space at end of buffer to handle a
			 * contiguous write. Wrap early if there's space at
			 * the beginning. This will leave a hole at the end
			 * of the buffer which we will have to skip over when
			 * flushing the buffer to disk.
			 */
			if (alq->aq_writetail >= len || flags & ALQ_WAITOK) {
				/* Keep track of # bytes left blank. */
				alq->aq_wrapearly = contigbytes;
				/* Do the wrap and adjust counters. */
				contigbytes = alq->aq_freebytes =
				    alq->aq_writetail;
				alq->aq_writehead = 0;
			}
		}
	}

	/*
	 * Return a NULL ALE if:
	 * - The message is larger than our underlying buffer.
	 * - The ALQ is being shutdown.
	 * - There is insufficient free space in our underlying buffer
	 *   to accept the message and the user can't wait for space.
	 * - There is insufficient free space in our underlying buffer
	 *   to accept the message and the alq is inactive due to prior
	 *   use of the ALQ_NOACTIVATE flag (which would lead to deadlock).
	 */
	if (len > alq->aq_buflen ||
	    alq->aq_flags & AQ_SHUTDOWN ||
	    (((flags & ALQ_NOWAIT) || (!(alq->aq_flags & AQ_ACTIVE) &&
	    HAS_PENDING_DATA(alq))) && contigbytes < len)) {
		ALQ_UNLOCK(alq);
		return (NULL);
	}

	/*
	 * If we want ordered writes and there is already at least one thread
	 * waiting for resources to become available, sleep until we're woken.
	 */
	if (alq->aq_flags & AQ_ORDERED && alq->aq_waiters > 0) {
		KASSERT(!(flags & ALQ_NOWAIT),
		    ("%s: ALQ_NOWAIT set but incorrectly ignored!", __func__));
		alq->aq_waiters++;
		msleep_spin(&alq->aq_waiters, &alq->aq_mtx, "alqgnord", 0);
		alq->aq_waiters--;
	}

	/*
	 * (ALQ_WAITOK && contigbytes < len) or contigbytes >= len, either enter
	 * while loop and sleep until we have enough contiguous free bytes
	 * (former) or skip (latter). If AQ_ORDERED is set, only 1 thread at a
	 * time will be in this loop. Otherwise, multiple threads may be
	 * sleeping here competing for ALQ resources.
	 */
	while (contigbytes < len && !(alq->aq_flags & AQ_SHUTDOWN)) {
		KASSERT(!(flags & ALQ_NOWAIT),
		    ("%s: ALQ_NOWAIT set but incorrectly ignored!", __func__));
		alq->aq_flags |= AQ_WANTED;
		alq->aq_waiters++;
		if (waitchan)
			wakeup(waitchan);
		msleep_spin(alq, &alq->aq_mtx, "alqgnres", 0);
		alq->aq_waiters--;

		if (alq->aq_writehead <= alq->aq_writetail)
			contigbytes = alq->aq_freebytes;
		else
			contigbytes = alq->aq_buflen - alq->aq_writehead;

		/*
		 * If we're the first thread to wake after an AQ_WANTED wakeup
		 * but there isn't enough free space for us, we're going to loop
		 * and sleep again. If there are other threads waiting in this
		 * loop, schedule a wakeup so that they can see if the space
		 * they require is available.
		 */
		if (alq->aq_waiters > 0 && !(alq->aq_flags & AQ_ORDERED) &&
		    contigbytes < len && !(alq->aq_flags & AQ_WANTED))
			waitchan = alq;
		else
			waitchan = NULL;
	}

	/*
	 * If there are waiters, we need to signal the waiting threads after we
	 * complete our work. The alq ptr is used as a wait channel for threads
	 * requiring resources to be freed up. In the AQ_ORDERED case, threads
	 * are not allowed to concurrently compete for resources in the above
	 * while loop, so we use a different wait channel in this case.
	 */
	if (alq->aq_waiters > 0) {
		if (alq->aq_flags & AQ_ORDERED)
			waitchan = &alq->aq_waiters;
		else
			waitchan = alq;
	} else
		waitchan = NULL;

	/* Bail if we're shutting down. */
	if (alq->aq_flags & AQ_SHUTDOWN) {
		ALQ_UNLOCK(alq);
		if (waitchan != NULL)
			wakeup_one(waitchan);
		return (NULL);
	}

	/*
	 * If we are here, we have a contiguous number of bytes >= len
	 * available in our buffer starting at aq_writehead.
	 */
	alq->aq_getpost.ae_data = alq->aq_entbuf + alq->aq_writehead;
	alq->aq_getpost.ae_bytesused = len;

	return (&alq->aq_getpost);
}
Beispiel #7
0
/*
 * Interrupt handler. This handler is bus-independent. Note that our
 * interrupt may be shared, so we must handle "false" interrupts.
 */
void
ichsmb_device_intr(void *cookie)
{
	const sc_p sc = cookie;
	const device_t dev = sc->dev;
	const int maxloops = 16;
	u_int8_t status;
	u_int8_t ok_bits;
	int cmd_index;
        int count;

	lockmgr(&sc->mutex, LK_EXCLUSIVE);
	for (count = 0; count < maxloops; count++) {

		/* Get and reset status bits */
		status = bus_read_1(sc->io_res, ICH_HST_STA);
#if ICHSMB_DEBUG
		if ((status & ~(ICH_HST_STA_INUSE_STS | ICH_HST_STA_HOST_BUSY))
		    || count > 0) {
			DBG("%d stat=0x%02x\n", count, status);
		}
#endif
		status &= ~(ICH_HST_STA_INUSE_STS | ICH_HST_STA_HOST_BUSY);
		if (status == 0)
			break;

		/* Check for unexpected interrupt */
		ok_bits = ICH_HST_STA_SMBALERT_STS;
		cmd_index = sc->ich_cmd >> 2;
		if (sc->ich_cmd != -1) {
			KASSERT(cmd_index < sizeof(ichsmb_state_irqs),
			    ("%s: ich_cmd=%d", device_get_nameunit(dev),
			    sc->ich_cmd));
			ok_bits |= ichsmb_state_irqs[cmd_index];
		}
		if ((status & ~ok_bits) != 0) {
			device_printf(dev, "irq 0x%02x during %d\n", status,
			    cmd_index);
			bus_write_1(sc->io_res,
			    ICH_HST_STA, (status & ~ok_bits));
			continue;
		}

		/* Handle SMBALERT interrupt */
		if (status & ICH_HST_STA_SMBALERT_STS) {
			static int smbalert_count = 16;
			if (smbalert_count > 0) {
				device_printf(dev, "SMBALERT# rec'd\n");
				if (--smbalert_count == 0) {
					device_printf(dev,
					    "not logging anymore\n");
				}
			}
		}

		/* Check for bus error */
		if (status & ICH_HST_STA_BUS_ERR) {
			sc->smb_error = SMB_ECOLLI;	/* XXX SMB_EBUSERR? */
			goto finished;
		}

		/* Check for device error */
		if (status & ICH_HST_STA_DEV_ERR) {
			sc->smb_error = SMB_ENOACK;	/* or SMB_ETIMEOUT? */
			goto finished;
		}

		/* Check for byte completion in block transfer */
		if (status & ICH_HST_STA_BYTE_DONE_STS) {
			if (sc->block_write) {
				if (sc->block_index < sc->block_count) {

					/* Write next byte */
					bus_write_1(sc->io_res,
					    ICH_BLOCK_DB,
					    sc->block_data[sc->block_index++]);
				}
			} else {

				/* First interrupt, get the count also */
				if (sc->block_index == 0) {
					sc->block_count = bus_read_1(
					    sc->io_res, ICH_D0);
				}

				/* Get next byte, if any */
				if (sc->block_index < sc->block_count) {

					/* Read next byte */
					sc->block_data[sc->block_index++] =
					    bus_read_1(sc->io_res,
					      ICH_BLOCK_DB);

					/* Set "LAST_BYTE" bit before reading
					   the last byte of block data */
					if (sc->block_index
					    >= sc->block_count - 1) {
						bus_write_1(sc->io_res,
						    ICH_HST_CNT,
						    ICH_HST_CNT_LAST_BYTE
							| ICH_HST_CNT_INTREN
							| sc->ich_cmd);
					}
				}
			}
		}

		/* Check command completion */
		if (status & ICH_HST_STA_INTR) {
			sc->smb_error = SMB_ENOERR;
finished:
			sc->ich_cmd = -1;
			bus_write_1(sc->io_res,
			    ICH_HST_STA, status);
			wakeup(sc);
			break;
		}

		/* Clear status bits and try again */
		bus_write_1(sc->io_res, ICH_HST_STA, status);
	}
	lockmgr(&sc->mutex, LK_RELEASE);

	/* Too many loops? */
	if (count == maxloops) {
		device_printf(dev, "interrupt loop, status=0x%02x\n",
		    bus_read_1(sc->io_res, ICH_HST_STA));
	}
}
Beispiel #8
0
void
igmpiput(Media *m, Ipifc *, Block *bp)
{
	int n;
	IGMPpkt *ghp;
	Ipaddr group;
	IGMPrep *rp, **lrp;
	Multicast *mp, **lmp;

	ghp = (IGMPpkt*)(bp->rp);
	netlog(Logigmp, "igmpiput: %d %I\n", ghp->vertype, ghp->group);

	n = blocklen(bp);
	if(n < IGMP_IPHDRSIZE+IGMP_HDRSIZE){
		netlog(Logigmp, "igmpiput: bad len\n");
		goto error;
	}
	if((ghp->vertype>>4) != 1){
		netlog(Logigmp, "igmpiput: bad igmp type\n");
		goto error;
	}
	if(ptclcsum(bp, IGMP_IPHDRSIZE, IGMP_HDRSIZE)){
		netlog(Logigmp, "igmpiput: checksum error %I\n", ghp->src);
		goto error;
	}

	group = nhgetl(ghp->group);
	
	lock(&igmpalloc);
	switch(ghp->vertype & 0xf){
	case IGMPquery:
		/*
		 *  start reporting groups that we're a member of.
		 */
		stats.inqueries++;
		for(rp = igmpalloc.reports; rp; rp = rp->next)
			if(rp->m == m)
				break;
		if(rp != nil)
			break;	/* already reporting */

		mp = Mediacopymulti(m);
		if(mp == nil)
			break;

		rp = malloc(sizeof(*rp));
		if(rp == nil)
			break;

		rp->m = m;
		rp->multi = mp;
		rp->ticks = 0;
		for(; mp; mp = mp->next)
			mp->timeout = nrand(MAXTIMEOUT);
		rp->next = igmpalloc.reports;
		igmpalloc.reports = rp;

		wakeup(&igmpalloc.r);

		break;
	case IGMPreport:
		/*
		 *  find report list for this medium
		 */
		stats.inreports++;
		lrp = &igmpalloc.reports;
		for(rp = *lrp; rp; rp = *lrp){
			if(rp->m == m)
				break;
			lrp = &rp->next;
		}
		if(rp == nil)
			break;

		/*
		 *  if someone else has reported a group,
		 *  we don't have to.
		 */
		lmp = &rp->multi;
		for(mp = *lmp; mp; mp = *lmp){
			if(mp->addr == group){
				*lmp = mp->next;
				free(mp);
				break;
			}
			lmp = &mp->next;
		}

		break;
	}
	unlock(&igmpalloc);

error:
	freeb(bp);
}
Beispiel #9
0
arg_t _flock(void)
{
	inoptr ino;
	struct oft *o;
	staticfast uint8_t c;
	staticfast uint8_t lock;
	staticfast int self;

	lock = lockop & ~LOCK_NB;
	self = 0;

	if (lock > LOCK_UN) {
		udata.u_error = EINVAL;
		return -1;
	}

	if ((ino = getinode(file)) == NULLINODE)
		return -1;
	o = &of_tab[udata.u_files[file]];

	c = ino->c_flags & CFLOCK;

	/* Upgrades and downgrades. Check if we are in fact doing a no-op */
	if (o->o_access & O_FLOCK) {
		self = 1;
		/* Shared or exclusive to shared can't block and is easy */
		if (lock == LOCK_SH) {
			if (c == CFLEX)
				c = 1;
			goto done;
		}
		/* Exclusive to exclusive - no op */
		if (c == CFLEX && lock == LOCK_EX)
			return 0;
		/* Shared to exclusive - handle via the loop */
	}
		
		
	/* Unlock - drop the locks, mark us not a lock holder. Doesn't block */
	if (lockop == LOCK_UN) {
		o->o_access &= ~O_FLOCK;
		deflock(o);
		return 0;
	}

	do {
		/* Exclusive lock must have no holders */
		if (c == self && lock == LOCK_EX) {
			c = CFLEX;
			goto done;
		}
		if (c < CFMAX) {
			c++;
			goto done;
		}
		if (c == CFMAX) {
			udata.u_error = ENOLCK;
			return -1;
		}
		/* LOCK_NB is defined as O_NDELAY... */
		if (psleep_flags(&ino->c_flags, (lockop & LOCK_NB)))
			return -1;
		/* locks will hopefully have changed .. */
		c = ino->c_flags & CFLOCK;
	} while (1);

done:
	if (o->o_access & O_FLOCK)
		deflock(o);
	ino->c_flags &= ~CFLOCK;
	ino->c_flags |= c;
	o->o_access |= O_FLOCK;
	wakeup(&ino->c_flags);
	return 0;
}
Beispiel #10
0
static void
pager(void *junk)
{
	int i;
	Segment *s;
	Proc *p, *ep;

	if(waserror())
		panic("pager: os error");

	p = proctab(0);
	ep = &p[conf.nproc];

loop:
	up->psstate = "Idle";
	sleep(&swapalloc.r, needpages, 0);

	while(needpages(junk)) {

		if(swapimage.c) {
			p++;
			if(p >= ep)
				p = proctab(0);
	
			if(p->state == Dead || p->noswap)
				continue;

			if(!canqlock(&p->seglock))
				continue;		/* process changing its segments */

			for(i = 0; i < NSEG; i++) {
				if(!needpages(junk)){
					qunlock(&p->seglock);
					goto loop;
				}

				if(s = p->seg[i]) {
					switch(s->type&SG_TYPE) {
					default:
						break;
					case SG_TEXT:
						pageout(p, s);
						break;
					case SG_DATA:
					case SG_BSS:
					case SG_STACK:
					case SG_SHARED:
						up->psstate = "Pageout";
						pageout(p, s);
						if(ioptr != 0) {
							up->psstate = "I/O";
							executeio();
						}
						break;
					}
				}
			}
			qunlock(&p->seglock);
		}
		else {
			print("out of physical memory; no swap configured\n");
			if(!cpuserver)
				freebroken();	/* can use the memory */
			else
				killbig("out of memory");

			/* Emulate the old system if no swap channel */
			tsleep(&up->sleep, return0, 0, 5000);
			wakeup(&palloc.r);
		}
	}
	goto loop;
}
Beispiel #11
0
int
sabtty_intr(struct sabtty_softc *sc, int *needsoftp)
{
	uint8_t isr0, isr1;
	int i, len = 0, needsoft = 0, r = 0, clearfifo = 0;

	isr0 = SAB_READ(sc, SAB_ISR0);
	isr1 = SAB_READ(sc, SAB_ISR1);

	if (isr0 || isr1)
		r = 1;

	if (isr0 & SAB_ISR0_RPF) {
		len = 32;
		clearfifo = 1;
	}
	if (isr0 & SAB_ISR0_TCD) {
		len = (32 - 1) & SAB_READ(sc, SAB_RBCL);
		clearfifo = 1;
	}
	if (isr0 & SAB_ISR0_TIME) {
		sabtty_cec_wait(sc);
		SAB_WRITE(sc, SAB_CMDR, SAB_CMDR_RFRD);
	}
	if (isr0 & SAB_ISR0_RFO) {
		sc->sc_flags |= SABTTYF_RINGOVERFLOW;
		clearfifo = 1;
	}
	if (len != 0) {
		uint8_t *ptr, b;

		ptr = sc->sc_rput;
		for (i = 0; i < len; i++) {
			b = SAB_READ(sc, SAB_RFIFO);
			if (i % 2 == 0) /* skip status byte */
				cn_check_magic(sc->sc_tty->t_dev,
					       b, sabtty_cnm_state);
			*ptr++ = b;
			if (ptr == sc->sc_rend)
				ptr = sc->sc_rbuf;
			if (ptr == sc->sc_rget) {
				if (ptr == sc->sc_rbuf)
					ptr = sc->sc_rend;
				ptr--;
				sc->sc_flags |= SABTTYF_RINGOVERFLOW;
			}
		}
		sc->sc_rput = ptr;
		needsoft = 1;
	}

	if (clearfifo) {
		sabtty_cec_wait(sc);
		SAB_WRITE(sc, SAB_CMDR, SAB_CMDR_RMC);
	}

	if (isr0 & SAB_ISR0_CDSC) {
		sc->sc_flags |= SABTTYF_CDCHG;
		needsoft = 1;
	}

	if (isr1 & SAB_ISR1_BRKT)
		cn_check_magic(sc->sc_tty->t_dev,
			       CNC_BREAK, sabtty_cnm_state);

	if (isr1 & (SAB_ISR1_XPR | SAB_ISR1_ALLS)) {
		if ((SAB_READ(sc, SAB_STAR) & SAB_STAR_XFW) &&
		    (sc->sc_flags & SABTTYF_STOP) == 0) {
			if (sc->sc_txc < 32)
				len = sc->sc_txc;
			else
				len = 32;

			if (len > 0) {
				SAB_WRITE_BLOCK(sc, SAB_XFIFO, sc->sc_txp, len);
				sc->sc_txp += len;
				sc->sc_txc -= len;

				sabtty_cec_wait(sc);
				SAB_WRITE(sc, SAB_CMDR, SAB_CMDR_XF);

				/*
				 * Prevent the false end of xmit from
				 * confusing things below.
				 */
				isr1 &= ~SAB_ISR1_ALLS;
			}
		}

		if ((sc->sc_txc == 0) || (sc->sc_flags & SABTTYF_STOP)) {
			if ((sc->sc_imr1 & SAB_IMR1_XPR) == 0) {
				sc->sc_imr1 |= SAB_IMR1_XPR;
				sc->sc_imr1 &= ~SAB_IMR1_ALLS;
				SAB_WRITE(sc, SAB_IMR1, sc->sc_imr1);
			}
		}
	}

	if ((isr1 & SAB_ISR1_ALLS) && ((sc->sc_txc == 0) ||
	    (sc->sc_flags & SABTTYF_STOP))) {
		if (sc->sc_flags & SABTTYF_TXDRAIN)
			wakeup(sc);
		sc->sc_flags &= ~SABTTYF_STOP;
		sc->sc_flags |= SABTTYF_DONE;
		sc->sc_imr1 |= SAB_IMR1_ALLS;
		SAB_WRITE(sc, SAB_IMR1, sc->sc_imr1);
		needsoft = 1;
	}

	if (needsoft)
		*needsoftp = needsoft;
	return (r);
}
Beispiel #12
0
/*
 * do an ioctl operation on a pfsnode (vp).
 * (vp) is not locked on entry or exit.
 */
static int
procfs_ioctl(struct vop_ioctl_args *ap)
{
	struct pfsnode *pfs = VTOPFS(ap->a_vp);
	struct proc *procp;
	struct proc *p;
	int error;
	int signo;
	struct procfs_status *psp;
	unsigned char flags;

	procp = pfind(pfs->pfs_pid);
	if (procp == NULL)
		return ENOTTY;
	p = curproc;
	if (p == NULL) {
		error = EINVAL;
		goto done;
	}

	/* Can't trace a process that's currently exec'ing. */ 
	if ((procp->p_flag & P_INEXEC) != 0) {
		error = EAGAIN;
		goto done;
	}
	if (!CHECKIO(p, procp) || p_trespass(ap->a_cred, procp->p_ucred)) {
		error = EPERM;
		goto done;
	}

	switch (ap->a_command) {
	case PIOCBIS:
	  procp->p_stops |= *(unsigned int*)ap->a_data;
	  break;
	case PIOCBIC:
	  procp->p_stops &= ~*(unsigned int*)ap->a_data;
	  break;
	case PIOCSFL:
	  /*
	   * NFLAGS is "non-suser_xxx flags" -- currently, only
	   * PFS_ISUGID ("ignore set u/g id");
	   */
#define NFLAGS	(PF_ISUGID)
	  flags = (unsigned char)*(unsigned int*)ap->a_data;
	  if (flags & NFLAGS && (error = priv_check_cred(ap->a_cred, PRIV_ROOT, 0)))
	    goto done;
	  procp->p_pfsflags = flags;
	  break;
	case PIOCGFL:
	  *(unsigned int*)ap->a_data = (unsigned int)procp->p_pfsflags;
	  break;
	case PIOCSTATUS:
	  /*
	   * NOTE: syscall entry deals with stopevents and may run without
	   *	   the MP lock.
	   */
	  psp = (struct procfs_status *)ap->a_data;
	  psp->flags = procp->p_pfsflags;
	  psp->events = procp->p_stops;
	  spin_lock(&procp->p_spin);
	  if (procp->p_step) {
	    psp->state = 0;
	    psp->why = procp->p_stype;
	    psp->val = procp->p_xstat;
	    spin_unlock(&procp->p_spin);
	  } else {
	    psp->state = 1;
	    spin_unlock(&procp->p_spin);
	    psp->why = 0;	/* Not defined values */
	    psp->val = 0;	/* Not defined values */
	  }
	  break;
	case PIOCWAIT:
	  /*
	   * NOTE: syscall entry deals with stopevents and may run without
	   *	   the MP lock.
	   */
	  psp = (struct procfs_status *)ap->a_data;
	  spin_lock(&procp->p_spin);
	  while (procp->p_step == 0) {
	    tsleep_interlock(&procp->p_stype, PCATCH);
	    spin_unlock(&procp->p_spin);
	    error = tsleep(&procp->p_stype, PCATCH | PINTERLOCKED, "piocwait", 0);
	    if (error)
	      goto done;
	    spin_lock(&procp->p_spin);
	  }
	  spin_unlock(&procp->p_spin);
	  psp->state = 1;	/* It stopped */
	  psp->flags = procp->p_pfsflags;
	  psp->events = procp->p_stops;
	  psp->why = procp->p_stype;	/* why it stopped */
	  psp->val = procp->p_xstat;	/* any extra info */
	  break;
	case PIOCCONT:	/* Restart a proc */
	  /*
	   * NOTE: syscall entry deals with stopevents and may run without
	   *	   the MP lock.  However, the caller is presumably interlocked
	   *	   by having waited.
	   */
	  if (procp->p_step == 0) {
	    error = EINVAL;	/* Can only start a stopped process */
	    goto done;
	  }
	  if ((signo = *(int*)ap->a_data) != 0) {
	    if (signo >= NSIG || signo <= 0) {
	      error = EINVAL;
	      goto done;
	    }
	    ksignal(procp, signo);
	  }
	  procp->p_step = 0;
	  wakeup(&procp->p_step);
	  break;
	default:
	  error = ENOTTY;
	  goto done;
	}
	error = 0;
done:
	PRELE(procp);
	return 0;
}
Beispiel #13
0
void consoleintr(int (*getc)(void)) {
	int c, doprocdump = 0;
	// int historyInd = 1; //when arr not full we should put here the last  command written, most cases, when full davka should be 15
	acquire(&cons.lock);
	int tmp;
	while ((c = getc()) >= 0) {
		switch (c) {
		case C('P'):  // Process listing.
					doprocdump = 1; // procdump() locks cons.lock indirectly; invoke later
		break;
		case C('U'):  // Kill line.
					kill_line();
		break;
		case C('H'):
		case '\x7f':  // Backspace
			if (input.e != input.w) { //regular caret
				input.e--;
				input.f--;
				if (input.e < input.f) {  // middle caret
					midflag = input.f - input.e;
					int i = input.e;
					while (i < input.f) {
						input.buf[i % INPUT_BUF] =
								input.buf[(i + 1) % INPUT_BUF];
						i++;
					}
				}
				consputc(BACKSPACE);
				midflag = 0;
			}
			break;
		case KEY_LEFT: // Left arrow
			if (input.e != input.w) {
				input.e--;
				consputc(KEY_LEFT);
			}
			break;
		case KEY_RIGHT: // Right arrow
			if (input.f > input.e) {
				input.e++;
				consputc(KEY_RIGHT);
			}
			break;
		case KEY_UP: // Key Up
			if (historyInd != 0)
				kill_line();
			if (historyList.size > 0) {
				char buffer[INPUT_BUF];
				tmp = (historyInd - 1) % historyList.size;
				if (tmp > -1) {
					historyInd = tmp;
					if (history(buffer, historyInd) == 0) {
						setHistory(buffer);
					} else
						panic("history");
				}
			}
			break;
		case KEY_DOWN: // Key Down
			kill_line();
			if (historyInd < historyList.size - 1) {
				char buffer[INPUT_BUF];
				historyInd = (historyInd + 1) % MAX_HISTORY;
				if (historyInd < historyList.size) {
					if (history(buffer, historyInd) == 0) {
						setHistory(buffer);
					} else
						panic("history");
				} else
					kill_line();
			} else if (historyInd == historyList.size - 1)
				kill_line();
			break;
		default:
			if (c != 0 && input.f - input.r < INPUT_BUF) {
				c = (c == '\r') ? '\n' : c;
				if (c == '\n') {
					// if any command is currently written, first record it in the history books
					if (input.f >= input.w) {
						updateHistory();
					}
				}
				//*** regular caret ***
				if (input.e >= input.f) {
					input.buf[input.e++ % INPUT_BUF] = c;
					consputc(c);
				}
				//*** middle caret ***
				else {
					if (c == '\n') {
						input.buf[input.f % INPUT_BUF] = c;
						input.e = input.f + 1;
						consputc(c);
					} else {
						int index = input.f;
						while (index > input.e) { // first shift by one each char in buffer from caret to the end
							input.buf[index % INPUT_BUF] = input.buf[(index - 1)
																	 % INPUT_BUF];
							index--;
						}
						input.buf[input.e % INPUT_BUF] = c; // Write new char in buffer
						int i = input.e;
						index = input.f + 1;
						while (i < index) // Print those chars from buffer to console
							consputc(input.buf[i++ % INPUT_BUF]);
						i = input.e;
						index = input.f;
						while (i < index) { // move caret back to it's place after the new character
							consputc(KEY_LEFT);
							i++;
						}
						input.e++;
					}
				}
				input.f++;
				if (c == '\n' || c == C('D') || input.f == input.r + INPUT_BUF) {
					input.w = input.f;
					input.e = input.f;
					wakeup(&input.r);
				}
			}
			break;
		}
	}
	release(&cons.lock);
	if (doprocdump) {
		procdump();  // now call procdump() wo. cons.lock held
	}
}
Beispiel #14
0
/*
 * Obtain a dquot structure for the specified identifier and quota file
 * reading the information from the file if necessary.
 */
static int
dqget(struct vnode *vp, u_long id, struct ufsmount *ump, int type,
    struct dquot **dqp)
{
	uint8_t buf[sizeof(struct dqblk64)];
	off_t base, recsize;
	struct dquot *dq, *dq1;
	struct dqhash *dqh;
	struct vnode *dqvp;
	struct iovec aiov;
	struct uio auio;
	int dqvplocked, error;

#ifdef DEBUG_VFS_LOCKS
	if (vp != NULLVP)
		ASSERT_VOP_ELOCKED(vp, "dqget");
#endif

	if (vp != NULLVP && *dqp != NODQUOT) {
		return (0);
	}

	/* XXX: Disallow negative id values to prevent the
	* creation of 100GB+ quota data files.
	*/
	if ((int)id < 0)
		return (EINVAL);

	UFS_LOCK(ump);
	dqvp = ump->um_quotas[type];
	if (dqvp == NULLVP || (ump->um_qflags[type] & QTF_CLOSING)) {
		*dqp = NODQUOT;
		UFS_UNLOCK(ump);
		return (EINVAL);
	}
	vref(dqvp);
	UFS_UNLOCK(ump);
	error = 0;
	dqvplocked = 0;

	/*
	 * Check the cache first.
	 */
	dqh = DQHASH(dqvp, id);
	DQH_LOCK();
	dq = dqhashfind(dqh, id, dqvp);
	if (dq != NULL) {
		DQH_UNLOCK();
hfound:		DQI_LOCK(dq);
		DQI_WAIT(dq, PINOD+1, "dqget");
		DQI_UNLOCK(dq);
		if (dq->dq_ump == NULL) {
			dqrele(vp, dq);
			dq = NODQUOT;
			error = EIO;
		}
		*dqp = dq;
		if (dqvplocked)
			vput(dqvp);
		else
			vrele(dqvp);
		return (error);
	}

	/*
	 * Quota vnode lock is before DQ_LOCK. Acquire dqvp lock there
	 * since new dq will appear on the hash chain DQ_LOCKed.
	 */
	if (vp != dqvp) {
		DQH_UNLOCK();
		vn_lock(dqvp, LK_SHARED | LK_RETRY);
		dqvplocked = 1;
		DQH_LOCK();
		/*
		 * Recheck the cache after sleep for quota vnode lock.
		 */
		dq = dqhashfind(dqh, id, dqvp);
		if (dq != NULL) {
			DQH_UNLOCK();
			goto hfound;
		}
	}

	/*
	 * Not in cache, allocate a new one or take it from the
	 * free list.
	 */
	if (TAILQ_FIRST(&dqfreelist) == NODQUOT &&
	    numdquot < MAXQUOTAS * desiredvnodes)
		desireddquot += DQUOTINC;
	if (numdquot < desireddquot) {
		numdquot++;
		DQH_UNLOCK();
		dq1 = malloc(sizeof *dq1, M_DQUOT, M_WAITOK | M_ZERO);
		mtx_init(&dq1->dq_lock, "dqlock", NULL, MTX_DEF);
		DQH_LOCK();
		/*
		 * Recheck the cache after sleep for memory.
		 */
		dq = dqhashfind(dqh, id, dqvp);
		if (dq != NULL) {
			numdquot--;
			DQH_UNLOCK();
			mtx_destroy(&dq1->dq_lock);
			free(dq1, M_DQUOT);
			goto hfound;
		}
		dq = dq1;
	} else {
		if ((dq = TAILQ_FIRST(&dqfreelist)) == NULL) {
			DQH_UNLOCK();
			tablefull("dquot");
			*dqp = NODQUOT;
			if (dqvplocked)
				vput(dqvp);
			else
				vrele(dqvp);
			return (EUSERS);
		}
		if (dq->dq_cnt || (dq->dq_flags & DQ_MOD))
			panic("dqget: free dquot isn't %p", dq);
		TAILQ_REMOVE(&dqfreelist, dq, dq_freelist);
		if (dq->dq_ump != NULL)
			LIST_REMOVE(dq, dq_hash);
	}

	/*
	 * Dq is put into hash already locked to prevent parallel
	 * usage while it is being read from file.
	 */
	dq->dq_flags = DQ_LOCK;
	dq->dq_id = id;
	dq->dq_type = type;
	dq->dq_ump = ump;
	LIST_INSERT_HEAD(dqh, dq, dq_hash);
	DQREF(dq);
	DQH_UNLOCK();

	/*
	 * Read the requested quota record from the quota file, performing
	 * any necessary conversions.
	 */
	if (ump->um_qflags[type] & QTF_64BIT) {
		recsize = sizeof(struct dqblk64);
		base = sizeof(struct dqhdr64);
	} else {
		recsize = sizeof(struct dqblk32);
		base = 0;
	}
	auio.uio_iov = &aiov;
	auio.uio_iovcnt = 1;
	aiov.iov_base = buf;
	aiov.iov_len = recsize;
	auio.uio_resid = recsize;
	auio.uio_offset = base + id * recsize;
	auio.uio_segflg = UIO_SYSSPACE;
	auio.uio_rw = UIO_READ;
	auio.uio_td = (struct thread *)0;

	error = VOP_READ(dqvp, &auio, 0, ump->um_cred[type]);
	if (auio.uio_resid == recsize && error == 0) {
		bzero(&dq->dq_dqb, sizeof(dq->dq_dqb));
	} else {
		if (ump->um_qflags[type] & QTF_64BIT)
			dqb64_dq((struct dqblk64 *)buf, dq);
		else
			dqb32_dq((struct dqblk32 *)buf, dq);
	}
	if (dqvplocked)
		vput(dqvp);
	else
		vrele(dqvp);
	/*
	 * I/O error in reading quota file, release
	 * quota structure and reflect problem to caller.
	 */
	if (error) {
		DQH_LOCK();
		dq->dq_ump = NULL;
		LIST_REMOVE(dq, dq_hash);
		DQH_UNLOCK();
		DQI_LOCK(dq);
		if (dq->dq_flags & DQ_WANT)
			wakeup(dq);
		dq->dq_flags = 0;
		DQI_UNLOCK(dq);
		dqrele(vp, dq);
		*dqp = NODQUOT;
		return (error);
	}
	DQI_LOCK(dq);
	/*
	 * Check for no limit to enforce.
	 * Initialize time values if necessary.
	 */
	if (dq->dq_isoftlimit == 0 && dq->dq_bsoftlimit == 0 &&
	    dq->dq_ihardlimit == 0 && dq->dq_bhardlimit == 0)
		dq->dq_flags |= DQ_FAKE;
	if (dq->dq_id != 0) {
		if (dq->dq_btime == 0) {
			dq->dq_btime = time_second + ump->um_btime[type];
			if (dq->dq_bsoftlimit &&
			    dq->dq_curblocks >= dq->dq_bsoftlimit)
				dq->dq_flags |= DQ_MOD;
		}
		if (dq->dq_itime == 0) {
			dq->dq_itime = time_second + ump->um_itime[type];
			if (dq->dq_isoftlimit &&
			    dq->dq_curinodes >= dq->dq_isoftlimit)
				dq->dq_flags |= DQ_MOD;
		}
	}
	DQI_WAKEUP(dq);
	DQI_UNLOCK(dq);
	*dqp = dq;
	return (0);
}
Beispiel #15
0
/*
 * Put a queue on the active list.  This will schedule it for writing.
 */
static void
ald_activate(struct alq *alq)
{
	LIST_INSERT_HEAD(&ald_active, alq, aq_act);
	wakeup(&ald_active);
}
Beispiel #16
0
void
trap(struct trapframe *tf)
{
  if(tf->trapno == T_SYSCALL){
    if(proc->killed)
      exit();
    proc->tf = tf;
    syscall();
    if (proc->tf->trapno != T_PGFLT) {
    if(proc->killed)
      exit();
    return;}
  }

  switch(tf->trapno){
  case T_IRQ0 + IRQ_TIMER:
    if(cpu->id == 0){
      acquire(&tickslock);
      ticks++;
      wakeup(&ticks);
      release(&tickslock);
    }
    lapiceoi();
    break;
  case T_IRQ0 + IRQ_IDE:
    ideintr();
    lapiceoi();
    break;
  case T_IRQ0 + IRQ_IDE+1:
    // Bochs generates spurious IDE1 interrupts.
    break;
  case T_IRQ0 + IRQ_KBD:
    kbdintr();
    lapiceoi();
    break;
  case T_IRQ0 + IRQ_COM1:
    uartintr();
    lapiceoi();
    break;
  case T_IRQ0 + 7:
  case T_IRQ0 + IRQ_SPURIOUS:
    cprintf("cpu%d: spurious interrupt at %x:%x\n",
            cpu->id, tf->cs, tf->eip);
    lapiceoi();
    break;
  // TODO(byan23): Add a case for page fault.
  case T_PGFLT:
    // check range of rcr2() not in guard page
    // only grow one page at a time
    //cprintf("check: \nstack: %d\nproc->sz: %d\nrcr: %d\n", USERTOP-proc->ssz, proc->sz, rcr2());
    if ((PGROUNDUP(proc->sz) + PGSIZE < USERTOP - proc->ssz) &&
	(rcr2() >= USERTOP - proc->ssz - PGSIZE) &&
	(rcr2() >= proc->sz + PGSIZE)) {
      //uint pgpos =
	//(rcr2() % PGSIZE == 0) ? rcr2() : (PGROUNDUP(rcr2()) - PGSIZE);
      uint pgpos = proc->ssz + PGSIZE;
      if (allocuvm(proc->pgdir, USERTOP - pgpos, USERTOP - proc->ssz) == 0) {
	cprintf("allocuvm failed cr2=0x%x\n", rcr2());
	proc->killed = 1;
	break;
      }
      proc->ssz = pgpos;
      break;
    }
  default:
    if(proc == 0 || (tf->cs&3) == 0){
      // In kernel, it must be our mistake.
      cprintf("unexpected trap %d from cpu %d eip %x (cr2=0x%x)\n",
              tf->trapno, cpu->id, tf->eip, rcr2());
      panic("trap");
    }
    // In user space, assume process misbehaved.
    cprintf("pid %d %s: trap %d err %d on cpu %d "
            "eip 0x%x addr 0x%x--kill proc\n",
            proc->pid, proc->name, tf->trapno, tf->err, cpu->id, tf->eip, 
            rcr2());
    proc->killed = 1;
  }

  // Force process exit if it has been killed and is in user space.
  // (If it is still executing in the kernel, let it keep running 
  // until it gets to the regular system call return.)
  if(proc && proc->killed && (tf->cs&3) == DPL_USER)
    exit();

  // Force process to give up CPU on clock tick.
  // If interrupts were on while locks held, would need to check nlock.
  if(proc && proc->state == RUNNING && tf->trapno == T_IRQ0+IRQ_TIMER)
    yield();

  // Check if the process has been killed since we yielded
  if(proc && proc->killed && (tf->cs&3) == DPL_USER)
    exit();
}
Beispiel #17
0
/*
 * Copy a new entry into the queue.  If the operation would block either
 * wait or return an error depending on the value of waitok.
 */
int
alq_writen(struct alq *alq, void *data, int len, int flags)
{
	int activate, copy, ret;
	void *waitchan;

	KASSERT((len > 0 && len <= alq->aq_buflen),
	    ("%s: len <= 0 || len > aq_buflen", __func__));

	activate = ret = 0;
	copy = len;
	waitchan = NULL;

	ALQ_LOCK(alq);

	/*
	 * Fail to perform the write and return EWOULDBLOCK if:
	 * - The message is larger than our underlying buffer.
	 * - The ALQ is being shutdown.
	 * - There is insufficient free space in our underlying buffer
	 *   to accept the message and the user can't wait for space.
	 * - There is insufficient free space in our underlying buffer
	 *   to accept the message and the alq is inactive due to prior
	 *   use of the ALQ_NOACTIVATE flag (which would lead to deadlock).
	 */
	if (len > alq->aq_buflen ||
	    alq->aq_flags & AQ_SHUTDOWN ||
	    (((flags & ALQ_NOWAIT) || (!(alq->aq_flags & AQ_ACTIVE) &&
	    HAS_PENDING_DATA(alq))) && alq->aq_freebytes < len)) {
		ALQ_UNLOCK(alq);
		return (EWOULDBLOCK);
	}

	/*
	 * If we want ordered writes and there is already at least one thread
	 * waiting for resources to become available, sleep until we're woken.
	 */
	if (alq->aq_flags & AQ_ORDERED && alq->aq_waiters > 0) {
		KASSERT(!(flags & ALQ_NOWAIT),
		    ("%s: ALQ_NOWAIT set but incorrectly ignored!", __func__));
		alq->aq_waiters++;
		msleep_spin(&alq->aq_waiters, &alq->aq_mtx, "alqwnord", 0);
		alq->aq_waiters--;
	}

	/*
	 * (ALQ_WAITOK && aq_freebytes < len) or aq_freebytes >= len, either
	 * enter while loop and sleep until we have enough free bytes (former)
	 * or skip (latter). If AQ_ORDERED is set, only 1 thread at a time will
	 * be in this loop. Otherwise, multiple threads may be sleeping here
	 * competing for ALQ resources.
	 */
	while (alq->aq_freebytes < len && !(alq->aq_flags & AQ_SHUTDOWN)) {
		KASSERT(!(flags & ALQ_NOWAIT),
		    ("%s: ALQ_NOWAIT set but incorrectly ignored!", __func__));
		alq->aq_flags |= AQ_WANTED;
		alq->aq_waiters++;
		if (waitchan)
			wakeup(waitchan);
		msleep_spin(alq, &alq->aq_mtx, "alqwnres", 0);
		alq->aq_waiters--;

		/*
		 * If we're the first thread to wake after an AQ_WANTED wakeup
		 * but there isn't enough free space for us, we're going to loop
		 * and sleep again. If there are other threads waiting in this
		 * loop, schedule a wakeup so that they can see if the space
		 * they require is available.
		 */
		if (alq->aq_waiters > 0 && !(alq->aq_flags & AQ_ORDERED) &&
		    alq->aq_freebytes < len && !(alq->aq_flags & AQ_WANTED))
			waitchan = alq;
		else
			waitchan = NULL;
	}

	/*
	 * If there are waiters, we need to signal the waiting threads after we
	 * complete our work. The alq ptr is used as a wait channel for threads
	 * requiring resources to be freed up. In the AQ_ORDERED case, threads
	 * are not allowed to concurrently compete for resources in the above
	 * while loop, so we use a different wait channel in this case.
	 */
	if (alq->aq_waiters > 0) {
		if (alq->aq_flags & AQ_ORDERED)
			waitchan = &alq->aq_waiters;
		else
			waitchan = alq;
	} else
		waitchan = NULL;

	/* Bail if we're shutting down. */
	if (alq->aq_flags & AQ_SHUTDOWN) {
		ret = EWOULDBLOCK;
		goto unlock;
	}

	/*
	 * If we need to wrap the buffer to accommodate the write,
	 * we'll need 2 calls to bcopy.
	 */
	if ((alq->aq_buflen - alq->aq_writehead) < len)
		copy = alq->aq_buflen - alq->aq_writehead;

	/* Copy message (or part thereof if wrap required) to the buffer. */
	bcopy(data, alq->aq_entbuf + alq->aq_writehead, copy);
	alq->aq_writehead += copy;

	if (alq->aq_writehead >= alq->aq_buflen) {
		KASSERT((alq->aq_writehead == alq->aq_buflen),
		    ("%s: alq->aq_writehead (%d) > alq->aq_buflen (%d)",
		    __func__,
		    alq->aq_writehead,
		    alq->aq_buflen));
		alq->aq_writehead = 0;
	}

	if (copy != len) {
		/*
		 * Wrap the buffer by copying the remainder of our message
		 * to the start of the buffer and resetting aq_writehead.
		 */
		bcopy(((uint8_t *)data)+copy, alq->aq_entbuf, len - copy);
		alq->aq_writehead = len - copy;
	}

	KASSERT((alq->aq_writehead >= 0 && alq->aq_writehead < alq->aq_buflen),
	    ("%s: aq_writehead < 0 || aq_writehead >= aq_buflen", __func__));

	alq->aq_freebytes -= len;

	if (!(alq->aq_flags & AQ_ACTIVE) && !(flags & ALQ_NOACTIVATE)) {
		alq->aq_flags |= AQ_ACTIVE;
		activate = 1;
	}

	KASSERT((HAS_PENDING_DATA(alq)), ("%s: queue empty!", __func__));

unlock:
	ALQ_UNLOCK(alq);

	if (activate) {
		ALD_LOCK();
		ald_activate(alq);
		ALD_UNLOCK();
	}

	/* NB: We rely on wakeup_one waking threads in a FIFO manner. */
	if (waitchan != NULL)
		wakeup_one(waitchan);

	return (ret);
}
Beispiel #18
0
Static int
ucomopen(dev_t dev, int flag, int mode, usb_proc_ptr p)
{
	int unit = UCOMUNIT(dev);
	struct ucom_softc *sc;
	usbd_status err;
	struct tty *tp;
	int s;
	int error;

	USB_GET_SC_OPEN(ucom, unit, sc);

	if (sc->sc_dying)
		return (ENXIO);

	tp = sc->sc_tty;

	DPRINTF(("%s: ucomopen: tp = %p\n", USBDEVNAME(sc->sc_dev), tp));

	if (ISSET(tp->t_state, TS_ISOPEN) &&
	    ISSET(tp->t_state, TS_XCLUDE) &&
	    suser(p))
		return (EBUSY);

	/*
	 * Do the following iff this is a first open.
	 */
	s = spltty();
	while (sc->sc_opening)
		tsleep(&sc->sc_opening, PRIBIO, "ucomop", 0);
	sc->sc_opening = 1;

	if (!ISSET(tp->t_state, TS_ISOPEN)) {
		struct termios t;

		sc->sc_poll = 0;
		sc->sc_lsr = sc->sc_msr = sc->sc_mcr = 0;

		tp->t_dev = dev;

		/*
		 * Initialize the termios status to the defaults.  Add in the
		 * sticky bits from TIOCSFLAGS.
		 */
		t.c_ispeed = 0;
		t.c_ospeed = TTYDEF_SPEED;
		t.c_cflag = TTYDEF_CFLAG;
		/* Make sure ucomparam() will do something. */
		tp->t_ospeed = 0;
		(void)ucomparam(tp, &t);
		tp->t_iflag = TTYDEF_IFLAG;
		tp->t_oflag = TTYDEF_OFLAG;
		tp->t_lflag = TTYDEF_LFLAG;
		ttychars(tp);
		ttsetwater(tp);

		/*
		 * Turn on DTR.  We must always do this, even if carrier is not
		 * present, because otherwise we'd have to use TIOCSDTR
		 * immediately after setting CLOCAL, which applications do not
		 * expect.  We always assert DTR while the device is open
		 * unless explicitly requested to deassert it.
		 */
		(void)ucomctl(sc, TIOCM_DTR | TIOCM_RTS, DMBIS);

		/* Device specific open */
		if (sc->sc_callback->ucom_open != NULL) {
			error = sc->sc_callback->ucom_open(sc->sc_parent,
							   sc->sc_portno);
			if (error) {
				ucom_cleanup(sc);
				sc->sc_opening = 0;
				wakeup(&sc->sc_opening);
				splx(s);
				return (error);
			}
		}

		DPRINTF(("ucomopen: open pipes in = %d out = %d\n",
			 sc->sc_bulkin_no, sc->sc_bulkout_no));

		/* Open the bulk pipes */
		/* Bulk-in pipe */
		err = usbd_open_pipe(sc->sc_iface, sc->sc_bulkin_no, 0,
				     &sc->sc_bulkin_pipe);
		if (err) {
			printf("%s: open bulk out error (addr %d): %s\n",
			       USBDEVNAME(sc->sc_dev), sc->sc_bulkin_no, 
			       usbd_errstr(err));
			error = EIO;
			goto fail_0;
		}
		/* Bulk-out pipe */
		err = usbd_open_pipe(sc->sc_iface, sc->sc_bulkout_no,
				     USBD_EXCLUSIVE_USE, &sc->sc_bulkout_pipe);
		if (err) {
			printf("%s: open bulk in error (addr %d): %s\n",
			       USBDEVNAME(sc->sc_dev), sc->sc_bulkout_no,
			       usbd_errstr(err));
			error = EIO;
			goto fail_1;
		}

		/* Allocate a request and an input buffer and start reading. */
		sc->sc_ixfer = usbd_alloc_xfer(sc->sc_udev);
		if (sc->sc_ixfer == NULL) {
			error = ENOMEM;
			goto fail_2;
		}

		sc->sc_ibuf = usbd_alloc_buffer(sc->sc_ixfer,
						sc->sc_ibufsizepad);
		if (sc->sc_ibuf == NULL) {
			error = ENOMEM;
			goto fail_3;
		}

		sc->sc_oxfer = usbd_alloc_xfer(sc->sc_udev);
		if (sc->sc_oxfer == NULL) {
			error = ENOMEM;
			goto fail_3;
		}

		sc->sc_obuf = usbd_alloc_buffer(sc->sc_oxfer,
						sc->sc_obufsize +
						sc->sc_opkthdrlen);
		if (sc->sc_obuf == NULL) {
			error = ENOMEM;
			goto fail_4;
		}

		/*
		 * Handle initial DCD.
		 */
		if (ISSET(sc->sc_msr, UMSR_DCD) ||
		    (minor(dev) & UCOM_CALLOUT_MASK))
			(*linesw[tp->t_line].l_modem)(tp, 1);

		ucomstartread(sc);
	}

	sc->sc_opening = 0;
	wakeup(&sc->sc_opening);
	splx(s);

	error = ttyopen(dev, tp);
	if (error)
		goto bad;

	error = (*linesw[tp->t_line].l_open)(dev, tp);
	if (error)
		goto bad;

	disc_optim(tp, &tp->t_termios, sc);

	DPRINTF(("%s: ucomopen: success\n", USBDEVNAME(sc->sc_dev)));

	sc->sc_poll = 1;
	sc->sc_refcnt++;

	return (0);

fail_4:
	usbd_free_xfer(sc->sc_oxfer);
	sc->sc_oxfer = NULL;
fail_3:
	usbd_free_xfer(sc->sc_ixfer);
	sc->sc_ixfer = NULL;
fail_2:
	usbd_close_pipe(sc->sc_bulkout_pipe);
	sc->sc_bulkout_pipe = NULL;
fail_1:
	usbd_close_pipe(sc->sc_bulkin_pipe);
	sc->sc_bulkin_pipe = NULL;
fail_0:
	sc->sc_opening = 0;
	wakeup(&sc->sc_opening);
	splx(s);
	return (error);

bad:
	if (!ISSET(tp->t_state, TS_ISOPEN)) {
		/*
		 * We failed to open the device, and nobody else had it opened.
		 * Clean up the state as appropriate.
		 */
		ucom_cleanup(sc);
	}

	DPRINTF(("%s: ucomopen: failed\n", USBDEVNAME(sc->sc_dev)));

	return (error);
}
Beispiel #19
0
/*
 * Exit: deallocate address space and other resources, change proc state to
 * zombie, and unlink proc from allproc and parent's lists.  Save exit status
 * and rusage for wait().  Check for child processes and orphan them.
 */
void
exit1(struct thread *td, int rv)
{
	struct proc *p, *nq, *q;
	struct vnode *ttyvp = NULL;

	mtx_assert(&Giant, MA_NOTOWNED);

	p = td->td_proc;
	/*
	 * XXX in case we're rebooting we just let init die in order to
	 * work around an unsolved stack overflow seen very late during
	 * shutdown on sparc64 when the gmirror worker process exists.
	 */
	if (p == initproc && rebooting == 0) {
		printf("init died (signal %d, exit %d)\n",
		    WTERMSIG(rv), WEXITSTATUS(rv));
		panic("Going nowhere without my init!");
	}

	/*
	 * MUST abort all other threads before proceeding past here.
	 */
	PROC_LOCK(p);
	while (p->p_flag & P_HADTHREADS) {
		/*
		 * First check if some other thread got here before us.
		 * If so, act appropriately: exit or suspend.
		 */
		thread_suspend_check(0);

		/*
		 * Kill off the other threads. This requires
		 * some co-operation from other parts of the kernel
		 * so it may not be instantaneous.  With this state set
		 * any thread entering the kernel from userspace will
		 * thread_exit() in trap().  Any thread attempting to
		 * sleep will return immediately with EINTR or EWOULDBLOCK
		 * which will hopefully force them to back out to userland
		 * freeing resources as they go.  Any thread attempting
		 * to return to userland will thread_exit() from userret().
		 * thread_exit() will unsuspend us when the last of the
		 * other threads exits.
		 * If there is already a thread singler after resumption,
		 * calling thread_single will fail; in that case, we just
		 * re-check all suspension request, the thread should
		 * either be suspended there or exit.
		 */
		if (!thread_single(SINGLE_EXIT))
			break;

		/*
		 * All other activity in this process is now stopped.
		 * Threading support has been turned off.
		 */
	}
	KASSERT(p->p_numthreads == 1,
	    ("exit1: proc %p exiting with %d threads", p, p->p_numthreads));
	racct_sub(p, RACCT_NTHR, 1);
	/*
	 * Wakeup anyone in procfs' PIOCWAIT.  They should have a hold
	 * on our vmspace, so we should block below until they have
	 * released their reference to us.  Note that if they have
	 * requested S_EXIT stops we will block here until they ack
	 * via PIOCCONT.
	 */
	_STOPEVENT(p, S_EXIT, rv);

	/*
	 * Ignore any pending request to stop due to a stop signal.
	 * Once P_WEXIT is set, future requests will be ignored as
	 * well.
	 */
	p->p_flag &= ~P_STOPPED_SIG;
	KASSERT(!P_SHOULDSTOP(p), ("exiting process is stopped"));

	/*
	 * Note that we are exiting and do another wakeup of anyone in
	 * PIOCWAIT in case they aren't listening for S_EXIT stops or
	 * decided to wait again after we told them we are exiting.
	 */
	p->p_flag |= P_WEXIT;
	wakeup(&p->p_stype);

	/*
	 * Wait for any processes that have a hold on our vmspace to
	 * release their reference.
	 */
	while (p->p_lock > 0)
		msleep(&p->p_lock, &p->p_mtx, PWAIT, "exithold", 0);

	p->p_xstat = rv;	/* Let event handler change exit status */
	PROC_UNLOCK(p);
	/* Drain the limit callout while we don't have the proc locked */
	callout_drain(&p->p_limco);

#ifdef AUDIT
	/*
	 * The Sun BSM exit token contains two components: an exit status as
	 * passed to exit(), and a return value to indicate what sort of exit
	 * it was.  The exit status is WEXITSTATUS(rv), but it's not clear
	 * what the return value is.
	 */
	AUDIT_ARG_EXIT(WEXITSTATUS(rv), 0);
	AUDIT_SYSCALL_EXIT(0, td);
#endif

	/* Are we a task leader? */
	if (p == p->p_leader) {
		mtx_lock(&ppeers_lock);
		q = p->p_peers;
		while (q != NULL) {
			PROC_LOCK(q);
			kern_psignal(q, SIGKILL);
			PROC_UNLOCK(q);
			q = q->p_peers;
		}
		while (p->p_peers != NULL)
			msleep(p, &ppeers_lock, PWAIT, "exit1", 0);
		mtx_unlock(&ppeers_lock);
	}

	/*
	 * Check if any loadable modules need anything done at process exit.
	 * E.g. SYSV IPC stuff
	 * XXX what if one of these generates an error?
	 */
	EVENTHANDLER_INVOKE(process_exit, p);

	/*
	 * If parent is waiting for us to exit or exec,
	 * P_PPWAIT is set; we will wakeup the parent below.
	 */
	PROC_LOCK(p);
	rv = p->p_xstat;	/* Event handler could change exit status */
	stopprofclock(p);
	p->p_flag &= ~(P_TRACED | P_PPWAIT | P_PPTRACE);

	/*
	 * Stop the real interval timer.  If the handler is currently
	 * executing, prevent it from rearming itself and let it finish.
	 */
	if (timevalisset(&p->p_realtimer.it_value) &&
	    callout_stop(&p->p_itcallout) == 0) {
		timevalclear(&p->p_realtimer.it_interval);
		msleep(&p->p_itcallout, &p->p_mtx, PWAIT, "ritwait", 0);
		KASSERT(!timevalisset(&p->p_realtimer.it_value),
		    ("realtime timer is still armed"));
	}
	PROC_UNLOCK(p);

	/*
	 * Reset any sigio structures pointing to us as a result of
	 * F_SETOWN with our pid.
	 */
	funsetownlst(&p->p_sigiolst);

	/*
	 * If this process has an nlminfo data area (for lockd), release it
	 */
	if (nlminfo_release_p != NULL && p->p_nlminfo != NULL)
		(*nlminfo_release_p)(p);

	/*
	 * Close open files and release open-file table.
	 * This may block!
	 */
	fdescfree(td);

	/*
	 * If this thread tickled GEOM, we need to wait for the giggling to
	 * stop before we return to userland
	 */
	if (td->td_pflags & TDP_GEOM)
		g_waitidle();

	/*
	 * Remove ourself from our leader's peer list and wake our leader.
	 */
	mtx_lock(&ppeers_lock);
	if (p->p_leader->p_peers) {
		q = p->p_leader;
		while (q->p_peers != p)
			q = q->p_peers;
		q->p_peers = p->p_peers;
		wakeup(p->p_leader);
	}
	mtx_unlock(&ppeers_lock);

	vmspace_exit(td);

	sx_xlock(&proctree_lock);
	if (SESS_LEADER(p)) {
		struct session *sp = p->p_session;
		struct tty *tp;

		/*
		 * s_ttyp is not zero'd; we use this to indicate that
		 * the session once had a controlling terminal. (for
		 * logging and informational purposes)
		 */
		SESS_LOCK(sp);
		ttyvp = sp->s_ttyvp;
		tp = sp->s_ttyp;
		sp->s_ttyvp = NULL;
		sp->s_ttydp = NULL;
		sp->s_leader = NULL;
		SESS_UNLOCK(sp);

		/*
		 * Signal foreground pgrp and revoke access to
		 * controlling terminal if it has not been revoked
		 * already.
		 *
		 * Because the TTY may have been revoked in the mean
		 * time and could already have a new session associated
		 * with it, make sure we don't send a SIGHUP to a
		 * foreground process group that does not belong to this
		 * session.
		 */

		if (tp != NULL) {
			tty_lock(tp);
			if (tp->t_session == sp)
				tty_signal_pgrp(tp, SIGHUP);
			tty_unlock(tp);
		}

		if (ttyvp != NULL) {
			sx_xunlock(&proctree_lock);
			if (vn_lock(ttyvp, LK_EXCLUSIVE) == 0) {
				VOP_REVOKE(ttyvp, REVOKEALL);
				VOP_UNLOCK(ttyvp, 0);
			}
			sx_xlock(&proctree_lock);
		}
	}
	fixjobc(p, p->p_pgrp, 0);
	sx_xunlock(&proctree_lock);
	(void)acct_process(td);

	/* Release the TTY now we've unlocked everything. */
	if (ttyvp != NULL)
		vrele(ttyvp);
#ifdef KTRACE
	ktrprocexit(td);
#endif
	/*
	 * Release reference to text vnode
	 */
	if (p->p_textvp != NULL) {
		vrele(p->p_textvp);
		p->p_textvp = NULL;
	}

	/*
	 * Release our limits structure.
	 */
	lim_free(p->p_limit);
	p->p_limit = NULL;

	tidhash_remove(td);

	/*
	 * Remove proc from allproc queue and pidhash chain.
	 * Place onto zombproc.  Unlink from parent's child list.
	 */
	sx_xlock(&allproc_lock);
	LIST_REMOVE(p, p_list);
	LIST_INSERT_HEAD(&zombproc, p, p_list);
	LIST_REMOVE(p, p_hash);
	sx_xunlock(&allproc_lock);

	/*
	 * Call machine-dependent code to release any
	 * machine-dependent resources other than the address space.
	 * The address space is released by "vmspace_exitfree(p)" in
	 * vm_waitproc().
	 */
	cpu_exit(td);

	WITNESS_WARN(WARN_PANIC, NULL, "process (pid %d) exiting", p->p_pid);

	/*
	 * Reparent all of our children to init.
	 */
	sx_xlock(&proctree_lock);
	q = LIST_FIRST(&p->p_children);
	if (q != NULL)		/* only need this if any child is S_ZOMB */
		wakeup(initproc);
	for (; q != NULL; q = nq) {
		nq = LIST_NEXT(q, p_sibling);
		PROC_LOCK(q);
		proc_reparent(q, initproc);
		q->p_sigparent = SIGCHLD;
		/*
		 * Traced processes are killed
		 * since their existence means someone is screwing up.
		 */
		if (q->p_flag & P_TRACED) {
			struct thread *temp;

			/*
			 * Since q was found on our children list, the
			 * proc_reparent() call moved q to the orphan
			 * list due to present P_TRACED flag. Clear
			 * orphan link for q now while q is locked.
			 */
			clear_orphan(q);
			q->p_flag &= ~(P_TRACED | P_STOPPED_TRACE);
			FOREACH_THREAD_IN_PROC(q, temp)
				temp->td_dbgflags &= ~TDB_SUSPEND;
			kern_psignal(q, SIGKILL);
		}
		PROC_UNLOCK(q);
	}

	/*
	 * Also get rid of our orphans.
	 */
	while ((q = LIST_FIRST(&p->p_orphans)) != NULL) {
		PROC_LOCK(q);
		clear_orphan(q);
		PROC_UNLOCK(q);
	}

	/* Save exit status. */
	PROC_LOCK(p);
	p->p_xthread = td;

	/* Tell the prison that we are gone. */
	prison_proc_free(p->p_ucred->cr_prison);

#ifdef KDTRACE_HOOKS
	/*
	 * Tell the DTrace fasttrap provider about the exit if it
	 * has declared an interest.
	 */
	if (dtrace_fasttrap_exit)
		dtrace_fasttrap_exit(p);
#endif

	/*
	 * Notify interested parties of our demise.
	 */
	KNOTE_LOCKED(&p->p_klist, NOTE_EXIT);

#ifdef KDTRACE_HOOKS
	int reason = CLD_EXITED;
	if (WCOREDUMP(rv))
		reason = CLD_DUMPED;
	else if (WIFSIGNALED(rv))
		reason = CLD_KILLED;
	SDT_PROBE(proc, kernel, , exit, reason, 0, 0, 0, 0);
#endif

	/*
	 * Just delete all entries in the p_klist. At this point we won't
	 * report any more events, and there are nasty race conditions that
	 * can beat us if we don't.
	 */
	knlist_clear(&p->p_klist, 1);

	/*
	 * If this is a process with a descriptor, we may not need to deliver
	 * a signal to the parent.  proctree_lock is held over
	 * procdesc_exit() to serialize concurrent calls to close() and
	 * exit().
	 */
	if (p->p_procdesc == NULL || procdesc_exit(p)) {
		/*
		 * Notify parent that we're gone.  If parent has the
		 * PS_NOCLDWAIT flag set, or if the handler is set to SIG_IGN,
		 * notify process 1 instead (and hope it will handle this
		 * situation).
		 */
		PROC_LOCK(p->p_pptr);
		mtx_lock(&p->p_pptr->p_sigacts->ps_mtx);
		if (p->p_pptr->p_sigacts->ps_flag &
		    (PS_NOCLDWAIT | PS_CLDSIGIGN)) {
			struct proc *pp;

			mtx_unlock(&p->p_pptr->p_sigacts->ps_mtx);
			pp = p->p_pptr;
			PROC_UNLOCK(pp);
			proc_reparent(p, initproc);
			p->p_sigparent = SIGCHLD;
			PROC_LOCK(p->p_pptr);

			/*
			 * Notify parent, so in case he was wait(2)ing or
			 * executing waitpid(2) with our pid, he will
			 * continue.
			 */
			wakeup(pp);
		} else
			mtx_unlock(&p->p_pptr->p_sigacts->ps_mtx);

		if (p->p_pptr == initproc)
			kern_psignal(p->p_pptr, SIGCHLD);
		else if (p->p_sigparent != 0) {
			if (p->p_sigparent == SIGCHLD)
				childproc_exited(p);
			else	/* LINUX thread */
				kern_psignal(p->p_pptr, p->p_sigparent);
		}
	} else
		PROC_LOCK(p->p_pptr);
	sx_xunlock(&proctree_lock);

	/*
	 * The state PRS_ZOMBIE prevents other proesses from sending
	 * signal to the process, to avoid memory leak, we free memory
	 * for signal queue at the time when the state is set.
	 */
	sigqueue_flush(&p->p_sigqueue);
	sigqueue_flush(&td->td_sigqueue);

	/*
	 * We have to wait until after acquiring all locks before
	 * changing p_state.  We need to avoid all possible context
	 * switches (including ones from blocking on a mutex) while
	 * marked as a zombie.  We also have to set the zombie state
	 * before we release the parent process' proc lock to avoid
	 * a lost wakeup.  So, we first call wakeup, then we grab the
	 * sched lock, update the state, and release the parent process'
	 * proc lock.
	 */
	wakeup(p->p_pptr);
	cv_broadcast(&p->p_pwait);
	sched_exit(p->p_pptr, td);
	PROC_SLOCK(p);
	p->p_state = PRS_ZOMBIE;
	PROC_UNLOCK(p->p_pptr);

	/*
	 * Hopefully no one will try to deliver a signal to the process this
	 * late in the game.
	 */
	knlist_destroy(&p->p_klist);

	/*
	 * Save our children's rusage information in our exit rusage.
	 */
	ruadd(&p->p_ru, &p->p_rux, &p->p_stats->p_cru, &p->p_crux);

	/*
	 * Make sure the scheduler takes this thread out of its tables etc.
	 * This will also release this thread's reference to the ucred.
	 * Other thread parts to release include pcb bits and such.
	 */
	thread_exit();
}
Beispiel #20
0
Static void
ucomstart(struct tty *tp)
{
	struct ucom_softc *sc;
	struct cblock *cbp;
	usbd_status err;
	int s;
	u_char *data;
	int cnt;

	USB_GET_SC(ucom, UCOMUNIT(tp->t_dev), sc);
	DPRINTF(("ucomstart: sc = %p\n", sc));

	if (sc->sc_dying)
		return;

	s = spltty();

	if (tp->t_state & TS_TBLOCK) {
		if (ISSET(sc->sc_mcr, UMCR_RTS) &&
		    ISSET(sc->sc_state, UCS_RTS_IFLOW)) {
			DPRINTF(("ucomstart: clear RTS\n"));
			(void)ucomctl(sc, UMCR_RTS, DMBIC);
		}
	} else {
		if (!ISSET(sc->sc_mcr, UMCR_RTS) &&
		    tp->t_rawq.c_cc <= tp->t_ilowat &&
		    ISSET(sc->sc_state, UCS_RTS_IFLOW)) {
			DPRINTF(("ucomstart: set RTS\n"));
			(void)ucomctl(sc, UMCR_RTS, DMBIS);
		}
	}

	if (ISSET(tp->t_state, TS_BUSY | TS_TIMEOUT | TS_TTSTOP)) {
		ttwwakeup(tp);
		DPRINTF(("ucomstart: stopped\n"));
		goto out;
	}

	if (tp->t_outq.c_cc <= tp->t_olowat) {
		if (ISSET(tp->t_state, TS_SO_OLOWAT)) {
			CLR(tp->t_state, TS_SO_OLOWAT);
			wakeup(TSA_OLOWAT(tp));
		}
		selwakeup(&tp->t_wsel);
		if (tp->t_outq.c_cc == 0) {
			if (ISSET(tp->t_state, TS_BUSY | TS_SO_OCOMPLETE) ==
			    TS_SO_OCOMPLETE && tp->t_outq.c_cc == 0) {
				CLR(tp->t_state, TS_SO_OCOMPLETE);
				wakeup(TSA_OCOMPLETE(tp));
			}
			goto out;
		}
	}

	/* Grab the first contiguous region of buffer space. */
	data = tp->t_outq.c_cf;
	cbp = (struct cblock *) ((intptr_t) tp->t_outq.c_cf & ~CROUND);
	cnt = min((char *) (cbp+1) - tp->t_outq.c_cf, tp->t_outq.c_cc);

	if (cnt == 0) {
		DPRINTF(("ucomstart: cnt == 0\n"));
		goto out;
	}

	SET(tp->t_state, TS_BUSY);

	if (cnt > sc->sc_obufsize) {
		DPRINTF(("ucomstart: big buffer %d chars\n", cnt));
		cnt = sc->sc_obufsize;
	}
	if (sc->sc_callback->ucom_write != NULL)
		sc->sc_callback->ucom_write(sc->sc_parent, sc->sc_portno,
					    sc->sc_obuf, data, &cnt);
	else
		memcpy(sc->sc_obuf, data, cnt);

	DPRINTF(("ucomstart: %d chars\n", cnt));
	usbd_setup_xfer(sc->sc_oxfer, sc->sc_bulkout_pipe, 
			(usbd_private_handle)sc, sc->sc_obuf, cnt,
			USBD_NO_COPY, USBD_NO_TIMEOUT, ucomwritecb);
	/* What can we do on error? */
	err = usbd_transfer(sc->sc_oxfer);
	if (err != USBD_IN_PROGRESS)
		printf("ucomstart: err=%s\n", usbd_errstr(err));

	ttwwakeup(tp);

    out:
	splx(s);
}
Beispiel #21
0
//PAGEBREAK: 41
void
trap(struct trapframe *tf)
{
  if(tf->trapno == T_SYSCALL){
    if(proc->killed)
      exit();
    proc->tf = tf;
    syscall();
    if(proc->killed)
      exit();
    return;
  }

  switch(tf->trapno){
  case T_IRQ0 + IRQ_TIMER:
    if(cpu->id == 0){
      acquire(&tickslock);
      ticks++;
      wakeup(&ticks);
      release(&tickslock);
    }
    lapiceoi();
    break;
  case T_IRQ0 + IRQ_IDE:
    ideintr();
    lapiceoi();
    break;
  case T_IRQ0 + IRQ_IDE+1:
    // Bochs generates spurious IDE1 interrupts.
    break;
  case T_IRQ0 + IRQ_KBD:
    kbdintr();
    lapiceoi();
    break;
  case T_IRQ0 + IRQ_COM1:
    uartintr();
    lapiceoi();
    break;
  case T_IRQ0 + 7:
  case T_IRQ0 + IRQ_SPURIOUS:
    cprintf("cpu%d: spurious interrupt at %x:%x\n",
            cpu->id, tf->cs, tf->eip);
    lapiceoi();
    break;
   
  //PAGEBREAK: 13
  default:
    if(proc == 0 || (tf->cs&3) == 0){
      // In kernel, it must be our mistake.
      cprintf("unexpected trap %d from cpu %d eip %x (cr2=0x%x)\n",
              tf->trapno, cpu->id, tf->eip, rcr2());
      panic("trap");
    }

    // In user space, assume process misbehaved.

	if((tf->eip+1)!=0 || (*(int *)(tf->esp-4)+1)!=0 || proc->xstack==0)
	{
		cprintf("pid %d %s: trap %d err %d on cpu %d "
				"eip 0x%x addr 0x%x--kill proc\n",
				proc->pid, proc->name, tf->trapno, tf->err, cpu->id, tf->eip, 
				rcr2());
	}
	else
	{
		// Catch the exception and get the return value.
		//asm volatile("\t movl %%eax,%0" : "=r"(ret_val));
		//cprintf("trap: return val=0x%x\n",ret_val);
		thread_exit((void* )tf->eax);
	}
    proc->killed = 1;
  }

  // Force process exit if it has been killed and is in user space.
  // (If it is still executing in the kernel, let it keep running 
  // until it gets to the regular system call return.)
  if(proc && proc->killed && (tf->cs&3) == DPL_USER)
    exit();

  // Force process to give up CPU on clock tick.
  // If interrupts were on while locks held, would need to check nlock.
  if(proc && proc->state == RUNNING && tf->trapno == T_IRQ0+IRQ_TIMER)
    yield();

  // Check if the process has been killed since we yielded
  if(proc && proc->killed && (tf->cs&3) == DPL_USER)
    exit();
}
Beispiel #22
0
void BodySW::set_state(PhysicsServer::BodyState p_state, const Variant &p_variant) {

	switch (p_state) {
		case PhysicsServer::BODY_STATE_TRANSFORM: {

			if (mode == PhysicsServer::BODY_MODE_KINEMATIC) {
				new_transform = p_variant;
				//wakeup_neighbours();
				set_active(true);
				if (first_time_kinematic) {
					_set_transform(p_variant);
					_set_inv_transform(get_transform().affine_inverse());
					first_time_kinematic = false;
				}

			} else if (mode == PhysicsServer::BODY_MODE_STATIC) {
				_set_transform(p_variant);
				_set_inv_transform(get_transform().affine_inverse());
				wakeup_neighbours();
			} else {
				Transform t = p_variant;
				t.orthonormalize();
				new_transform = get_transform(); //used as old to compute motion
				if (new_transform == t)
					break;
				_set_transform(t);
				_set_inv_transform(get_transform().inverse());
			}
			wakeup();

		} break;
		case PhysicsServer::BODY_STATE_LINEAR_VELOCITY: {

			/*
			if (mode==PhysicsServer::BODY_MODE_STATIC)
				break;
			*/
			linear_velocity = p_variant;
			wakeup();
		} break;
		case PhysicsServer::BODY_STATE_ANGULAR_VELOCITY: {
			/*
			if (mode!=PhysicsServer::BODY_MODE_RIGID)
				break;
			*/
			angular_velocity = p_variant;
			wakeup();

		} break;
		case PhysicsServer::BODY_STATE_SLEEPING: {
			//?
			if (mode == PhysicsServer::BODY_MODE_STATIC || mode == PhysicsServer::BODY_MODE_KINEMATIC)
				break;
			bool do_sleep = p_variant;
			if (do_sleep) {
				linear_velocity = Vector3();
				//biased_linear_velocity=Vector3();
				angular_velocity = Vector3();
				//biased_angular_velocity=Vector3();
				set_active(false);
			} else {
				if (mode != PhysicsServer::BODY_MODE_STATIC)
					set_active(true);
			}
		} break;
		case PhysicsServer::BODY_STATE_CAN_SLEEP: {
			can_sleep = p_variant;
			if (mode == PhysicsServer::BODY_MODE_RIGID && !active && !can_sleep)
				set_active(true);

		} break;
	}
}
Beispiel #23
0
/*---------------------------------------------------------------------------*
 *	isdn_layer2_trace_ind
 *	---------------------
 *	is called from layer 1, adds timestamp to trace data and puts
 *	it into a queue, from which it can be read from the i4btrc
 *	device. The unit number in the trace header selects the minor
 *	device's queue the data is put into.
 *---------------------------------------------------------------------------*/
int
isdn_layer2_trace_ind(struct l2_softc *sc, struct isdn_l3_driver *drv, i4b_trace_hdr *hdr, size_t len, unsigned char *buf)
{
	struct mbuf *m;
	int bri, x;
	int trunc = 0;
	int totlen = len + sizeof(i4b_trace_hdr);

	MICROTIME(hdr->time);
	hdr->bri = sc->drv->bri;

	/*
	 * for telephony (or better non-HDLC HSCX mode) we get 
	 * (MCLBYTE + sizeof(i4b_trace_hdr_t)) length packets
	 * to put into the queue to userland. because of this
	 * we detect this situation, strip the length to MCLBYTES
	 * max size, and infor the userland program of this fact
	 * by putting the no of truncated bytes into hdr->trunc.
	 */
	 
	if(totlen > MCLBYTES)
	{
		trunc = 1;
		hdr->trunc = totlen - MCLBYTES;
		totlen = MCLBYTES;
	}
	else
	{
		hdr->trunc = 0;
	}

	/* set length of trace record */
	
	hdr->length = totlen;
	
	/* check valid interface */
	
	if((bri = hdr->bri) > NISDNTRC)
	{
		printf("i4b_trace: get_trace_data_from_l1 - bri > NISDNTRC!\n"); 
		return(0);
	}

	/* get mbuf */
	
	if(!(m = i4b_Bgetmbuf(totlen)))
	{
		printf("i4b_trace: get_trace_data_from_l1 - i4b_getmbuf() failed!\n");
		return(0);
	}

	/* check if we are in analyzemode */
	
	if(analyzemode && (bri == rxunit || bri == txunit))
	{
		if(bri == rxunit)
			hdr->dir = FROM_NT;
		else
			hdr->dir = FROM_TE;
		bri = outunit;			
	}

	if(IF_QFULL(&trace_queue[bri]))
	{
		struct mbuf *m1;

		x = splnet();
		IF_DEQUEUE(&trace_queue[bri], m1);
		splx(x);		

		i4b_Bfreembuf(m1);
	}
	
	/* copy trace header */
	memcpy(m->m_data, hdr, sizeof(i4b_trace_hdr));

	/* copy trace data */
	if(trunc)
		memcpy(&m->m_data[sizeof(i4b_trace_hdr)], buf, totlen-sizeof(i4b_trace_hdr));
	else
		memcpy(&m->m_data[sizeof(i4b_trace_hdr)], buf, len);

	x = splnet();
	
	IF_ENQUEUE(&trace_queue[bri], m);
	
	if(device_state[bri] & ST_WAITDATA)
	{
		device_state[bri] &= ~ST_WAITDATA;
		wakeup((caddr_t) &trace_queue[bri]);
	}

	splx(x);
	
	return(1);
}
Beispiel #24
0
static int
cbb_pci_filt(void *arg)
{
    struct cbb_softc *sc = arg;
    uint32_t sockevent;
    uint8_t csc;
    int retval = FILTER_STRAY;

    /*
     * Some chips also require us to read the old ExCA registe for card
     * status change when we route CSC vis PCI.  This isn't supposed to be
     * required, but it clears the interrupt state on some chipsets.
     * Maybe there's a setting that would obviate its need.  Maybe we
     * should test the status bits and deal with them, but so far we've
     * not found any machines that don't also give us the socket status
     * indication above.
     *
     * This call used to be unconditional.  However, further research
     * suggests that we hit this condition when the card READY interrupt
     * fired.  So now we only read it for 16-bit cards, and we only claim
     * the interrupt if READY is set.  If this still causes problems, then
     * the next step would be to read this if we have a 16-bit card *OR*
     * we have no card.  We treat the READY signal as if it were the power
     * completion signal.  Some bridges may double signal things here, bit
     * signalling twice should be OK since we only sleep on the powerintr
     * in one place and a double wakeup would be benign there.
     */
    if (sc->flags & CBB_16BIT_CARD) {
        csc = exca_getb(&sc->exca[0], EXCA_CSC);
        if (csc & EXCA_CSC_READY) {
            atomic_add_int(&sc->powerintr, 1);
            wakeup((void *)&sc->powerintr);
            retval = FILTER_HANDLED;
        }
    }

    /*
     * Read the socket event.  Sometimes, the theory goes, the PCI bus is
     * so loaded that it cannot satisfy the read request, so we get
     * garbage back from the following read.  We have to filter out the
     * garbage so that we don't spontaneously reset the card under high
     * load.  PCI isn't supposed to act like this.  No doubt this is a bug
     * in the PCI bridge chipset (or cbb brige) that's being used in
     * certain amd64 laptops today.  Work around the issue by assuming
     * that any bits we don't know about being set means that we got
     * garbage.
     */
    sockevent = cbb_get(sc, CBB_SOCKET_EVENT);
    if (sockevent != 0 && (sockevent & ~CBB_SOCKET_EVENT_VALID_MASK) == 0) {
        /*
         * If anything has happened to the socket, we assume that the
         * card is no longer OK, and we shouldn't call its ISR.  We
         * set cardok as soon as we've attached the card.  This helps
         * in a noisy eject, which happens all too often when users
         * are ejecting their PC Cards.
         *
         * We use this method in preference to checking to see if the
         * card is still there because the check suffers from a race
         * condition in the bouncing case.
         */
#define DELTA (CBB_SOCKET_MASK_CD)
        if (sockevent & DELTA) {
            cbb_clrb(sc, CBB_SOCKET_MASK, DELTA);
            cbb_set(sc, CBB_SOCKET_EVENT, DELTA);
            sc->cardok = 0;
            cbb_disable_func_intr(sc);
            wakeup(&sc->intrhand);
        }
#undef DELTA

        /*
         * Wakeup anybody waiting for a power interrupt.  We have to
         * use atomic_add_int for wakups on other cores.
         */
        if (sockevent & CBB_SOCKET_EVENT_POWER) {
            cbb_clrb(sc, CBB_SOCKET_MASK, CBB_SOCKET_EVENT_POWER);
            cbb_set(sc, CBB_SOCKET_EVENT, CBB_SOCKET_EVENT_POWER);
            atomic_add_int(&sc->powerintr, 1);
            wakeup((void *)&sc->powerintr);
        }

        /*
         * Status change interrupts aren't presently used in the
         * rest of the driver.  For now, just ACK them.
         */
        if (sockevent & CBB_SOCKET_EVENT_CSTS)
            cbb_set(sc, CBB_SOCKET_EVENT, CBB_SOCKET_EVENT_CSTS);
        retval = FILTER_HANDLED;
    }
    return retval;
}
Beispiel #25
0
static void
ptdone(struct cam_periph *periph, union ccb *done_ccb)
{
	struct pt_softc *softc;
	struct ccb_scsiio *csio;

	softc = (struct pt_softc *)periph->softc;
	csio = &done_ccb->csio;
	switch (csio->ccb_h.ccb_state) {
	case PT_CCB_BUFFER_IO:
	case PT_CCB_BUFFER_IO_UA:
	{
		struct buf *bp;
		int    oldspl;

		bp = (struct buf *)done_ccb->ccb_h.ccb_bp;
		if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
			int error;
			int s;
			int sf;
			
			if ((csio->ccb_h.ccb_state & PT_CCB_RETRY_UA) != 0)
				sf = SF_RETRY_UA;
			else
				sf = 0;

			sf |= SF_RETRY_SELTO;

			if ((error = pterror(done_ccb, 0, sf)) == ERESTART) {
				/*
				 * A retry was scheuled, so
				 * just return.
				 */
				return;
			}
			if (error != 0) {
				struct buf *q_bp;

				s = splbio();

				if (error == ENXIO) {
					/*
					 * Catastrophic error.  Mark our device
					 * as invalid.
					 */
					xpt_print_path(periph->path);
					printf("Invalidating device\n");
					softc->flags |= PT_FLAG_DEVICE_INVALID;
				}

				/*
				 * return all queued I/O with EIO, so that
				 * the client can retry these I/Os in the
				 * proper order should it attempt to recover.
				 */
				while ((q_bp = bufq_first(&softc->buf_queue))
					!= NULL) {
					bufq_remove(&softc->buf_queue, q_bp);
					q_bp->b_resid = q_bp->b_bcount;
					q_bp->b_error = EIO;
					q_bp->b_flags |= B_ERROR;
					biodone(q_bp);
				}
				splx(s);
				bp->b_error = error;
				bp->b_resid = bp->b_bcount;
				bp->b_flags |= B_ERROR;
			} else {
				bp->b_resid = csio->resid;
				bp->b_error = 0;
				if (bp->b_resid != 0) {
					/* Short transfer ??? */
					bp->b_flags |= B_ERROR;
				}
			}
			if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0)
				cam_release_devq(done_ccb->ccb_h.path,
						 /*relsim_flags*/0,
						 /*reduction*/0,
						 /*timeout*/0,
						 /*getcount_only*/0);
		} else {
			bp->b_resid = csio->resid;
			if (bp->b_resid != 0)
				bp->b_flags |= B_ERROR;
		}

		/*
		 * Block out any asyncronous callbacks
		 * while we touch the pending ccb list.
		 */
		oldspl = splcam();
		LIST_REMOVE(&done_ccb->ccb_h, periph_links.le);
		splx(oldspl);

		devstat_end_transaction_buf(&softc->device_stats, bp);
		biodone(bp);
		break;
	}
	case PT_CCB_WAITING:
		/* Caller will release the CCB */
		wakeup(&done_ccb->ccb_h.cbfcnp);
		return;
	}
	xpt_release_ccb(done_ccb);
}
Beispiel #26
0
/*
 * icintr()
 */
static int
icintr(device_t dev, int event, char *ptr)
{
	struct ic_softc *sc = (struct ic_softc *)device_get_softc(dev);
	struct mbuf *top;
	int len;

	mtx_lock(&sc->ic_lock);

	switch (event) {

	case INTR_GENERAL:
	case INTR_START:
		sc->ic_cp = sc->ic_ifbuf;
		sc->ic_xfercnt = 0;
		sc->ic_flags |= IC_IFBUF_BUSY;
		break;

	case INTR_STOP:

		/* if any error occured during transfert,
		 * drop the packet */
		sc->ic_flags &= ~IC_IFBUF_BUSY;
		if ((sc->ic_flags & (IC_BUFFERS_BUSY | IC_BUFFER_WAITER)) ==
		    IC_BUFFER_WAITER)
			wakeup(&sc);
		if (sc->ic_iferrs)
			goto err;
		if ((len = sc->ic_xfercnt) == 0)
			break;					/* ignore */
		if (len <= ICHDRLEN)
			goto err;
		len -= ICHDRLEN;
		sc->ic_ifp->if_ipackets++;
		sc->ic_ifp->if_ibytes += len;
		BPF_TAP(sc->ic_ifp, sc->ic_ifbuf, len + ICHDRLEN);
		top = m_devget(sc->ic_ifbuf + ICHDRLEN, len, 0, sc->ic_ifp, 0);
		if (top) {
			mtx_unlock(&sc->ic_lock);
			M_SETFIB(top, sc->ic_ifp->if_fib);
			netisr_dispatch(NETISR_IP, top);
			mtx_lock(&sc->ic_lock);
		}
		break;
	err:
		if_printf(sc->ic_ifp, "errors (%d)!\n", sc->ic_iferrs);
		sc->ic_iferrs = 0;			/* reset error count */
		sc->ic_ifp->if_ierrors++;
		break;

	case INTR_RECEIVE:
		if (sc->ic_xfercnt >= sc->ic_ifp->if_mtu + ICHDRLEN) {
			sc->ic_iferrs++;
		} else {
			*sc->ic_cp++ = *ptr;
			sc->ic_xfercnt++;
		}
		break;

	case INTR_NOACK:			/* xfer terminated by master */
		break;

	case INTR_TRANSMIT:
		*ptr = 0xff;					/* XXX */
	  	break;

	case INTR_ERROR:
		sc->ic_iferrs++;
		break;

	default:
		panic("%s: unknown event (%d)!", __func__, event);
	}

	mtx_unlock(&sc->ic_lock);
	return (0);
}
Beispiel #27
0
/*
 * Read/write routine for a buffer.  Validates the arguments and schedules the
 * transfer.  Does not wait for the transfer to complete.
 */
void
edmcastrategy(struct buf *bp)
{
    struct ed_softc *ed;
    struct disklabel *lp;
    daddr_t blkno;

    ed = device_lookup_private(&ed_cd, DISKUNIT(bp->b_dev));
    lp = ed->sc_dk.dk_label;

    ATADEBUG_PRINT(("edmcastrategy (%s)\n", device_xname(ed->sc_dev)),
                   DEBUG_XFERS);

    /* Valid request?  */
    if (bp->b_blkno < 0 ||
            (bp->b_bcount % lp->d_secsize) != 0 ||
            (bp->b_bcount / lp->d_secsize) >= (1 << NBBY)) {
        bp->b_error = EINVAL;
        goto done;
    }

    /* If device invalidated (e.g. media change, door open), error. */
    if ((ed->sc_flags & WDF_LOADED) == 0) {
        bp->b_error = EIO;
        goto done;
    }

    /* If it's a null transfer, return immediately. */
    if (bp->b_bcount == 0)
        goto done;

    /*
     * Do bounds checking, adjust transfer. if error, process.
     * If end of partition, just return.
     */
    if (DISKPART(bp->b_dev) != RAW_PART &&
            bounds_check_with_label(&ed->sc_dk, bp,
                                    (ed->sc_flags & (WDF_WLABEL|WDF_LABELLING)) != 0) <= 0)
        goto done;

    /*
     * Now convert the block number to absolute and put it in
     * terms of the device's logical block size.
     */
    if (lp->d_secsize >= DEV_BSIZE)
        blkno = bp->b_blkno / (lp->d_secsize / DEV_BSIZE);
    else
        blkno = bp->b_blkno * (DEV_BSIZE / lp->d_secsize);

    if (DISKPART(bp->b_dev) != RAW_PART)
        blkno += lp->d_partitions[DISKPART(bp->b_dev)].p_offset;

    bp->b_rawblkno = blkno;

    /* Queue transfer on drive, activate drive and controller if idle. */
    mutex_enter(&ed->sc_q_lock);
    bufq_put(ed->sc_q, bp);
    mutex_exit(&ed->sc_q_lock);

    /* Ring the worker thread */
    wakeup(ed->edc_softc);

    return;
done:
    /* Toss transfer; we're done early. */
    bp->b_resid = bp->b_bcount;
    biodone(bp);
}
Beispiel #28
0
/*
 * icoutput()
 */
static int
icoutput(struct ifnet *ifp, struct mbuf *m, const struct sockaddr *dst,
    struct route *ro)
{
	struct ic_softc *sc = ifp->if_softc;
	device_t icdev = sc->ic_dev;
	device_t parent = device_get_parent(icdev);
	int len, sent;
	struct mbuf *mm;
	u_char *cp;
	u_int32_t hdr;

	/* BPF writes need to be handled specially. */ 
	if (dst->sa_family == AF_UNSPEC)
		bcopy(dst->sa_data, &hdr, sizeof(hdr));
	else 
		hdr = dst->sa_family;

	mtx_lock(&sc->ic_lock);
	ifp->if_drv_flags |= IFF_DRV_RUNNING;

	/* already sending? */
	if (sc->ic_flags & IC_SENDING) {
		ifp->if_oerrors++;
		goto error;
	}
		
	/* insert header */
	bcopy ((char *)&hdr, sc->ic_obuf, ICHDRLEN);

	cp = sc->ic_obuf + ICHDRLEN;
	len = 0;
	mm = m;
	do {
		if (len + mm->m_len > sc->ic_ifp->if_mtu) {
			/* packet too large */
			ifp->if_oerrors++;
			goto error;
		}
			
		bcopy(mtod(mm,char *), cp, mm->m_len);
		cp += mm->m_len;
		len += mm->m_len;

	} while ((mm = mm->m_next));

	BPF_MTAP2(ifp, &hdr, sizeof(hdr), m);

	sc->ic_flags |= (IC_SENDING | IC_OBUF_BUSY);

	m_freem(m);
	mtx_unlock(&sc->ic_lock);

	/* send the packet */
	if (iicbus_block_write(parent, sc->ic_addr, sc->ic_obuf,
				len + ICHDRLEN, &sent))

		ifp->if_oerrors++;
	else {
		ifp->if_opackets++;
		ifp->if_obytes += len;
	}	

	mtx_lock(&sc->ic_lock);
	sc->ic_flags &= ~(IC_SENDING | IC_OBUF_BUSY);
	if ((sc->ic_flags & (IC_BUFFERS_BUSY | IC_BUFFER_WAITER)) ==
	    IC_BUFFER_WAITER)
		wakeup(&sc);
	mtx_unlock(&sc->ic_lock);

	return (0);

error:
	m_freem(m);
	mtx_unlock(&sc->ic_lock);

	return(0);
}
Beispiel #29
0
static int
tegra_i2c_transfer(device_t dev, struct iic_msg *msgs, uint32_t nmsgs)
{
    int rv, i;
    struct tegra_i2c_softc *sc;
    enum tegra_i2c_xfer_type xtype;

    sc = device_get_softc(dev);
    LOCK(sc);

    /* Get the bus. */
    while (sc->bus_inuse == 1)
        SLEEP(sc,  0);
    sc->bus_inuse = 1;

    rv = 0;
    for (i = 0; i < nmsgs; i++) {
        sc->msg = &msgs[i];
        sc->msg_idx = 0;
        sc->bus_err = 0;
        sc->done = 0;
        /* Check for valid parameters. */
        if (sc->msg == NULL || sc->msg->buf == NULL ||
                sc->msg->len == 0) {
            rv = EINVAL;
            break;
        }

        /* Get flags for next transfer. */
        if (i == (nmsgs - 1)) {
            if (msgs[i].flags & IIC_M_NOSTOP)
                xtype = XFER_CONTINUE;
            else
                xtype = XFER_STOP;
        } else {
            if (msgs[i + 1].flags & IIC_M_NOSTART)
                xtype = XFER_CONTINUE;
            else
                xtype = XFER_REPEAT_START;
        }
        tegra_i2c_start_msg(sc, sc->msg, xtype);
        if (cold)
            rv = tegra_i2c_poll(sc);
        else
            rv = msleep(&sc->done, &sc->mtx, PZERO, "iic",
                        I2C_REQUEST_TIMEOUT);

        WR4(sc, I2C_INTERRUPT_MASK_REGISTER, 0);
        WR4(sc, I2C_INTERRUPT_STATUS_REGISTER, 0xFFFFFFFF);
        if (rv == 0)
            rv = sc->bus_err;
        if (rv != 0)
            break;
    }

    if (rv != 0) {
        tegra_i2c_hw_init(sc);
        tegra_i2c_flush_fifo(sc);
    }

    sc->msg = NULL;
    sc->msg_idx = 0;
    sc->bus_err = 0;
    sc->done = 0;

    /* Wake up the processes that are waiting for the bus. */
    sc->bus_inuse = 0;
    wakeup(sc);
    UNLOCK(sc);

    return (rv);
}
Beispiel #30
0
void
kbdirq(struct intrframe *fr){
    int c, i;
    const char *cs=0;
    struct Kbd *k = &term.kbd;	/* we only support one, for now */

    c = inb( k->port );

    /* make or break */
    if( c & 0x80 ){
        /* break */
        c &= 0x7f;

        switch( scan_codes[c].type ){
        case SCROLL:
            k->state &= ~SCROLL;
            break;
        case SHIFT:
            k->state &= ~SHIFT;
            break;
        case ALT:
            k->state &= ~ALT;
            break;
        case CTL:
            k->state &= ~CTL;
            break;
        }
    }else{
        /* make */
        switch( scan_codes[c].type ){

        case SHIFT:
            k->state |= SHIFT;
            break;
        case ALT:
            k->state |= ALT;
            break;
        case CTL:
            k->state |= CTL;
            break;
        case NUM:
            k->state ^= NUM;
            break;
        case CAPS:
            k->state ^= CAPS;
            break;
        case NONE:
            break;
        case ASCII:
        case FUNC:
            if( k->state & CTL )
                cs = scan_codes[c].ctl;
            else if( k->state & (SHIFT | CAPS) )
                cs = scan_codes[c].shift;
            else
                cs = scan_codes[c].unshift;
            break;

        case KP:
            if( c == 83 && k->state & CTL && k->state & ALT ){
                /* ctl-alt-del detected */
#ifdef USE_GDB
                if( bootflags & BOOT_USEGDB )
                    breakpoint();
                else
#endif
                {
                    E9PRINTF(("\n<C-A-Del>\nrebooting\n"));
                    kprintf("\nrebooting...");
                    reboot();
                }

            }
            if( k->state & CTL )
                cs = scan_codes[c].ctl;
            else if( k->state & (SHIFT | NUM) )
                cs = scan_codes[c].shift;
            else
                cs = scan_codes[c].unshift;
            break;


        }

        /* special control char ? */
        for(i=0; i<sizeof(term.file.cchars); i++){
            if(cs && term.file.cchars[i] && *cs == term.file.cchars[i]){
                sigunblock( term.file.ccpid );
                ksendmsg( term.file.ccpid, MSG_CCHAR_0 + i );
                return;
            }
        }

        /* enqueue chars */
        while( cs && *cs ){
            if( k->len < PCTERM_QUEUE_SIZE ){
                k->queue[ k->head++ ] = (k->state & ALT) ? (0x80 | *cs) : *cs;
                k->head %= PCTERM_QUEUE_SIZE;
                k->len ++;
            }
            /* else just drop it */

            cs++;
        }
        wakeup(k);
    }
}