예제 #1
0
static void
wdc_atapi_phase_complete(struct ata_xfer *xfer)
{
	struct ata_channel *chp = xfer->c_chp;
	struct atac_softc *atac = chp->ch_atac;
#if NATA_DMA || NATA_PIOBM
	struct wdc_softc *wdc = CHAN_TO_WDC(chp);
#endif
	struct scsipi_xfer *sc_xfer = xfer->c_cmd;
	struct ata_drive_datas *drvp = &chp->ch_drive[xfer->c_drive];

	/* wait for DSC if needed */
	if (drvp->drive_flags & ATA_DRIVE_ATAPIDSCW) {
		ATADEBUG_PRINT(("wdc_atapi_phase_complete(%s:%d:%d) "
		    "polldsc %d\n", device_xname(atac->atac_dev),
		    chp->ch_channel,
		    xfer->c_drive, xfer->c_dscpoll), DEBUG_XFERS);
#if 1
		if (cold)
			panic("wdc_atapi_phase_complete: cold");
#endif
		if (wdcwait(chp, WDCS_DSC, WDCS_DSC, 10,
		    AT_POLL) == WDCWAIT_TOUT) {
			/* 10ms not enough, try again in 1 tick */
			if (xfer->c_dscpoll++ >
			    mstohz(sc_xfer->timeout)) {
				printf("%s:%d:%d: wait_for_dsc "
				    "failed\n",
				    device_xname(atac->atac_dev),
				    chp->ch_channel, xfer->c_drive);
				sc_xfer->error = XS_TIMEOUT;
				wdc_atapi_reset(chp, xfer);
				return;
			} else
				callout_reset(&chp->ch_callout, 1,
				    wdc_atapi_polldsc, xfer);
			return;
		}
	}

	/*
	 * Some drive occasionally set WDCS_ERR with
	 * "ATA illegal length indication" in the error
	 * register. If we read some data the sense is valid
	 * anyway, so don't report the error.
	 */
	if (chp->ch_status & WDCS_ERR &&
	    ((sc_xfer->xs_control & XS_CTL_REQSENSE) == 0 ||
	    sc_xfer->resid == sc_xfer->datalen)) {
		/* save the short sense */
		sc_xfer->error = XS_SHORTSENSE;
		sc_xfer->sense.atapi_sense = chp->ch_error;
		if ((sc_xfer->xs_periph->periph_quirks &
		    PQUIRK_NOSENSE) == 0) {
			/* ask scsipi to send a REQUEST_SENSE */
			sc_xfer->error = XS_BUSY;
			sc_xfer->status = SCSI_CHECK;
		}
#if NATA_DMA || NATA_PIOBM
		else if (wdc->dma_status &
		    (WDC_DMAST_NOIRQ | WDC_DMAST_ERR)) {
#if NATA_DMA
			ata_dmaerr(drvp,
			    (xfer->c_flags & C_POLL) ? AT_POLL : 0);
#endif
			sc_xfer->error = XS_RESET;
			wdc_atapi_reset(chp, xfer);
			return;
		}
#endif
	}
	if (xfer->c_bcount != 0) {
		ATADEBUG_PRINT(("wdc_atapi_intr: bcount value is "
		    "%d after io\n", xfer->c_bcount), DEBUG_XFERS);
	}
#ifdef DIAGNOSTIC
	if (xfer->c_bcount < 0) {
		printf("wdc_atapi_intr warning: bcount value "
		    "is %d after io\n", xfer->c_bcount);
	}
#endif
	ATADEBUG_PRINT(("wdc_atapi_phase_complete: wdc_atapi_done(), "
	    "error 0x%x sense 0x%x\n", sc_xfer->error,
	    sc_xfer->sense.atapi_sense), DEBUG_INTR);
	wdc_atapi_done(chp, xfer);
}
예제 #2
0
/*
 * TDMA state machine handler.
 */
static int
tdma_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg)
{
	struct ieee80211_tdma_state *ts = vap->iv_tdma;
	struct ieee80211com *ic = vap->iv_ic;
	enum ieee80211_state ostate;
	int status;

	IEEE80211_LOCK_ASSERT(ic);

	ostate = vap->iv_state;
	IEEE80211_DPRINTF(vap, IEEE80211_MSG_STATE, "%s: %s -> %s (%d)\n",
	    __func__, ieee80211_state_name[ostate],
	    ieee80211_state_name[nstate], arg);

	if (vap->iv_flags_ext & IEEE80211_FEXT_SWBMISS)
		callout_stop(&vap->iv_swbmiss);
	if (nstate == IEEE80211_S_SCAN &&
	    (ostate == IEEE80211_S_INIT || ostate == IEEE80211_S_RUN) &&
	    ts->tdma_slot != 0) {
		/*
		 * Override adhoc behaviour when operating as a slave;
		 * we need to scan even if the channel is locked.
		 */
		vap->iv_state = nstate;			/* state transition */
		ieee80211_cancel_scan(vap);		/* background scan */
		if (ostate == IEEE80211_S_RUN) {
			/* purge station table; entries are stale */
			ieee80211_iterate_nodes(&ic->ic_sta, sta_leave, vap);
		}
		if (vap->iv_flags_ext & IEEE80211_FEXT_SCANREQ) {
			ieee80211_check_scan(vap,
			    vap->iv_scanreq_flags,
			    vap->iv_scanreq_duration,
			    vap->iv_scanreq_mindwell,
			    vap->iv_scanreq_maxdwell,
			    vap->iv_scanreq_nssid, vap->iv_scanreq_ssid);
			vap->iv_flags_ext &= ~IEEE80211_FEXT_SCANREQ;
		} else
			ieee80211_check_scan_current(vap);
		status = 0;
	} else {
		status = ts->tdma_newstate(vap, nstate, arg);
	}
	if (status == 0 && 
	    nstate == IEEE80211_S_RUN && ostate != IEEE80211_S_RUN &&
	    (vap->iv_flags_ext & IEEE80211_FEXT_SWBMISS) &&
	    ts->tdma_slot != 0 &&
	    vap->iv_des_chan == IEEE80211_CHAN_ANYC) {
		/*
		 * Start s/w beacon miss timer for slave devices w/o
		 * hardware support.  Note we do this only if we're
		 * not locked to a channel (i.e. roam to follow the
		 * master). The 2x is a fudge for our doing this in
		 * software.
		 */
		vap->iv_swbmiss_period = IEEE80211_TU_TO_TICKS(
		    2 * vap->iv_bmissthreshold * ts->tdma_bintval *
		    ((ts->tdma_slotcnt * ts->tdma_slotlen) / 1024));
		vap->iv_swbmiss_count = 0;
		callout_reset(&vap->iv_swbmiss, vap->iv_swbmiss_period,
			ieee80211_swbmiss, vap);
	}
	return status;
}
예제 #3
0
/*
 * Tcp output routine: figure out what should be sent and send it.
 */
int
bsd_tcp_output(struct tcpcb *tp)
{
    struct socket *so = tp->t_inpcb->inp_socket;
    long len, recwin, sendwin;
    int off, flags, error;
#ifdef TCP_SIGNATURE
    int sigoff = 0;
#endif
    struct mbuf *m;
    struct ip *ip = NULL;
    struct ipovly *ipov = NULL;
    struct tcphdr *th;
    u_char opt[TCP_MAXOLEN];
    unsigned ipoptlen, optlen, hdrlen;
    int idle, sendalot;
    int i, sack_rxmit;
    struct sackhole *p;
#if 0
    int maxburst = TCP_MAXBURST;
#endif
    struct rmxp_tao tao;
#ifdef INET6
    struct ip6_hdr *ip6 = NULL;
    int isipv6;

    bzero(&tao, sizeof(tao));
    isipv6 = (tp->t_inpcb->inp_vflag & INP_IPV6) != 0;
#endif

    INP_LOCK_ASSERT(tp->t_inpcb);

    /*
     * Determine length of data that should be transmitted,
     * and flags that will be used.
     * If there is some data or critical controls (SYN, RST)
     * to send, then transmit; otherwise, investigate further.
     */
    idle = (tp->t_flags & TF_LASTIDLE) || (tp->snd_max == tp->snd_una);
    if (idle && (ticks - tp->t_rcvtime) >= tp->t_rxtcur)
    {
        /*
         * We have been idle for "a while" and no acks are
         * expected to clock out any data we send --
         * slow start to get ack "clock" running again.
         *
         * Set the slow-start flight size depending on whether
         * this is a local network or not.
         */
        int ss = ss_fltsz;
#ifdef INET6
        if (isipv6)
        {
            if (in6_localaddr(&tp->t_inpcb->in6p_faddr))
                ss = ss_fltsz_local;
        }
        else
#endif /* INET6 */
            if (in_localaddr(tp->t_inpcb->inp_faddr))
                ss = ss_fltsz_local;
        tp->snd_cwnd = tp->t_maxseg * ss;
    }
    tp->t_flags &= ~TF_LASTIDLE;
    if (idle)
    {
        if (tp->t_flags & TF_MORETOCOME)
        {
            tp->t_flags |= TF_LASTIDLE;
            idle = 0;
        }
    }
again:
    /*
     * If we've recently taken a timeout, snd_max will be greater than
     * snd_nxt.  There may be SACK information that allows us to avoid
     * resending already delivered data.  Adjust snd_nxt accordingly.
     */
    if (tp->sack_enable && SEQ_LT(tp->snd_nxt, tp->snd_max))
        tcp_sack_adjust(tp);
    sendalot = 0;
    off = tp->snd_nxt - tp->snd_una;
    sendwin = min(tp->snd_wnd, tp->snd_cwnd);
    sendwin = min(sendwin, tp->snd_bwnd);

    flags = tcp_outflags[tp->t_state];
    /*
     * Send any SACK-generated retransmissions.  If we're explicitly trying
     * to send out new data (when sendalot is 1), bypass this function.
     * If we retransmit in fast recovery mode, decrement snd_cwnd, since
     * we're replacing a (future) new transmission with a retransmission
     * now, and we previously incremented snd_cwnd in tcp_input().
     */
    /*
     * Still in sack recovery , reset rxmit flag to zero.
     */
    sack_rxmit = 0;
    len = 0;
    p = NULL;
    if (tp->sack_enable && IN_FASTRECOVERY(tp) &&
            (p = tcp_sack_output(tp)))
    {
        //KASSERT(tp->snd_cwnd >= 0,
        //	("%s: CWIN is negative : %ld", __FUNCTION__, tp->snd_cwnd));
        /* Do not retransmit SACK segments beyond snd_recover */
        if (SEQ_GT(p->end, tp->snd_recover))
        {
            /*
             * (At least) part of sack hole extends beyond
             * snd_recover. Check to see if we can rexmit data
             * for this hole.
             */
            if (SEQ_GEQ(p->rxmit, tp->snd_recover))
            {
                /*
                 * Can't rexmit any more data for this hole.
                 * That data will be rexmitted in the next
                 * sack recovery episode, when snd_recover
                 * moves past p->rxmit.
                 */
                p = NULL;
                goto after_sack_rexmit;
            }
            else
                /* Can rexmit part of the current hole */
                len = ((long)ulmin(tp->snd_cwnd,
                                   tp->snd_recover - p->rxmit));
        }
        else
            len = ((long)ulmin(tp->snd_cwnd, p->end - p->rxmit));
        off = p->rxmit - tp->snd_una;
        //KASSERT(off >= 0,("%s: sack block to the left of una : %d",
        //    __func__, off));
        if (len > 0)
        {
            sack_rxmit = 1;
            sendalot = 1;
            tcpstat.tcps_sack_rexmits++;
            tcpstat.tcps_sack_rexmit_bytes +=
                min(len, tp->t_maxseg);
        }
    }
after_sack_rexmit:
    /*
     * Get standard flags, and add SYN or FIN if requested by 'hidden'
     * state flags.
     */
    if (tp->t_flags & TF_NEEDFIN)
        flags |= TH_FIN;
    if (tp->t_flags & TF_NEEDSYN)
        flags |= TH_SYN;

    SOCKBUF_LOCK(&so->so_snd);
    /*
     * If in persist timeout with window of 0, send 1 byte.
     * Otherwise, if window is small but nonzero
     * and timer expired, we will send what we can
     * and go to transmit state.
     */
    if (tp->t_force)
    {
        if (sendwin == 0)
        {
            /*
             * If we still have some data to send, then
             * clear the FIN bit.  Usually this would
             * happen below when it realizes that we
             * aren't sending all the data.  However,
             * if we have exactly 1 byte of unsent data,
             * then it won't clear the FIN bit below,
             * and if we are in persist state, we wind
             * up sending the packet without recording
             * that we sent the FIN bit.
             *
             * We can't just blindly clear the FIN bit,
             * because if we don't have any more data
             * to send then the probe will be the FIN
             * itself.
             */
            if (off < so->so_snd.sb_cc)
                flags &= ~TH_FIN;
            sendwin = 1;
        }
        else
        {
            callout_stop(tp->tt_persist);
            tp->t_rxtshift = 0;
        }
    }

    /*
     * If snd_nxt == snd_max and we have transmitted a FIN, the
     * offset will be > 0 even if so_snd.sb_cc is 0, resulting in
     * a negative length.  This can also occur when TCP opens up
     * its congestion window while receiving additional duplicate
     * acks after fast-retransmit because TCP will reset snd_nxt
     * to snd_max after the fast-retransmit.
     *
     * In the normal retransmit-FIN-only case, however, snd_nxt will
     * be set to snd_una, the offset will be 0, and the length may
     * wind up 0.
     *
     * If sack_rxmit is true we are retransmitting from the scoreboard
     * in which case len is already set.
     */
    if (!sack_rxmit)
        len = ((long)ulmin(so->so_snd.sb_cc, sendwin) - off);

    /*
     * Lop off SYN bit if it has already been sent.  However, if this
     * is SYN-SENT state and if segment contains data and if we don't
     * know that foreign host supports TAO, suppress sending segment.
     */
    if ((flags & TH_SYN) && SEQ_GT(tp->snd_nxt, tp->snd_una))
    {
        flags &= ~TH_SYN;
        off--, len++;
        if (tcp_do_rfc1644)
            tcp_hc_gettao(&tp->t_inpcb->inp_inc, &tao);
        if (len > 0 && tp->t_state == TCPS_SYN_SENT &&
                tao.tao_ccsent == 0)
            goto just_return;
    }

    /*
     * Be careful not to send data and/or FIN on SYN segments
     * in cases when no CC option will be sent.
     * This measure is needed to prevent interoperability problems
     * with not fully conformant TCP implementations.
     */
    if ((flags & TH_SYN) &&
            ((tp->t_flags & TF_NOOPT) || !(tp->t_flags & TF_REQ_CC) ||
             ((flags & TH_ACK) && !(tp->t_flags & TF_RCVD_CC))))
    {
        len = 0;
        flags &= ~TH_FIN;
    }

    if (len < 0)
    {
        /*
         * If FIN has been sent but not acked,
         * but we haven't been called to retransmit,
         * len will be < 0.  Otherwise, window shrank
         * after we sent into it.  If window shrank to 0,
         * cancel pending retransmit, pull snd_nxt back
         * to (closed) window, and set the persist timer
         * if it isn't already going.  If the window didn't
         * close completely, just wait for an ACK.
         */
        len = 0;
        if (sendwin == 0)
        {
            callout_stop(tp->tt_rexmt);
            tp->t_rxtshift = 0;
            tp->snd_nxt = tp->snd_una;
            if (!callout_active(tp->tt_persist))
                tcp_setpersist(tp);
        }
    }

    /*
     * len will be >= 0 after this point.  Truncate to the maximum
     * segment length and ensure that FIN is removed if the length
     * no longer contains the last data byte.
     */
    if (len > tp->t_maxseg)
    {
        len = tp->t_maxseg;
        sendalot = 1;
    }
    if (sack_rxmit)
    {
        if (SEQ_LT(p->rxmit + len, tp->snd_una + so->so_snd.sb_cc))
            flags &= ~TH_FIN;
    }
    else
    {
        if (SEQ_LT(tp->snd_nxt + len, tp->snd_una + so->so_snd.sb_cc))
            flags &= ~TH_FIN;
    }

    recwin = sbspace(&so->so_rcv);

    /*
     * Sender silly window avoidance.   We transmit under the following
     * conditions when len is non-zero:
     *
     *	- We have a full segment
     *	- This is the last buffer in a write()/send() and we are
     *	  either idle or running NODELAY
     *	- we've timed out (e.g. persist timer)
     *	- we have more then 1/2 the maximum send window's worth of
     *	  data (receiver may be limited the window size)
     *	- we need to retransmit
     */
    if (len)
    {
        if (len == tp->t_maxseg)
            goto send;
        /*
         * NOTE! on localhost connections an 'ack' from the remote
         * end may occur synchronously with the output and cause
         * us to flush a buffer queued with moretocome.  XXX
         *
         * note: the len + off check is almost certainly unnecessary.
         */
        if (!(tp->t_flags & TF_MORETOCOME) &&	/* normal case */
                (idle || (tp->t_flags & TF_NODELAY)) &&
                len + off >= so->so_snd.sb_cc &&
                (tp->t_flags & TF_NOPUSH) == 0)
        {
            goto send;
        }
        if (tp->t_force)			/* typ. timeout case */
            goto send;
        if (len >= tp->max_sndwnd / 2 && tp->max_sndwnd > 0)
            goto send;
        if (SEQ_LT(tp->snd_nxt, tp->snd_max))	/* retransmit case */
            goto send;
        if (sack_rxmit)
            goto send;
    }

    /*
     * Compare available window to amount of window
     * known to peer (as advertised window less
     * next expected input).  If the difference is at least two
     * max size segments, or at least 50% of the maximum possible
     * window, then want to send a window update to peer.
     * Skip this if the connection is in T/TCP half-open state.
     */
    if (recwin > 0 && !(tp->t_flags & TF_NEEDSYN))
    {
        /*
         * "adv" is the amount we can increase the window,
         * taking into account that we are limited by
         * TCP_MAXWIN << tp->rcv_scale.
         */
        long adv = min(recwin, (long)TCP_MAXWIN << tp->rcv_scale) -
                   (tp->rcv_adv - tp->rcv_nxt);

        if (adv >= (long) (2 * tp->t_maxseg))
            goto send;
        if (2 * adv >= (long) so->so_rcv.sb_hiwat)
            goto send;
    }

    /*
     * Send if we owe the peer an ACK, RST, SYN, or urgent data.  ACKNOW
     * is also a catch-all for the retransmit timer timeout case.
     */
    if (tp->t_flags & TF_ACKNOW)
        goto send;
    if ((flags & TH_RST) ||
            ((flags & TH_SYN) && (tp->t_flags & TF_NEEDSYN) == 0))
        goto send;
    if (SEQ_GT(tp->snd_up, tp->snd_una))
        goto send;
    /*
     * If our state indicates that FIN should be sent
     * and we have not yet done so, then we need to send.
     */
    if (flags & TH_FIN &&
            ((tp->t_flags & TF_SENTFIN) == 0 || tp->snd_nxt == tp->snd_una))
        goto send;
    /*
     * In SACK, it is possible for tcp_output to fail to send a segment
     * after the retransmission timer has been turned off.  Make sure
     * that the retransmission timer is set.
     */
    if (tp->sack_enable && SEQ_GT(tp->snd_max, tp->snd_una) &&
            !callout_active(tp->tt_rexmt) &&
            !callout_active(tp->tt_persist))
    {
        callout_reset(tp->tt_rexmt, tp->t_rxtcur,
                      tcp_timer_rexmt, tp);
        goto just_return;
    }
    /*
     * TCP window updates are not reliable, rather a polling protocol
     * using ``persist'' packets is used to insure receipt of window
     * updates.  The three ``states'' for the output side are:
     *	idle			not doing retransmits or persists
     *	persisting		to move a small or zero window
     *	(re)transmitting	and thereby not persisting
     *
     * callout_active(tp->tt_persist)
     *	is true when we are in persist state.
     * tp->t_force
     *	is set when we are called to send a persist packet.
     * callout_active(tp->tt_rexmt)
     *	is set when we are retransmitting
     * The output side is idle when both timers are zero.
     *
     * If send window is too small, there is data to transmit, and no
     * retransmit or persist is pending, then go to persist state.
     * If nothing happens soon, send when timer expires:
     * if window is nonzero, transmit what we can,
     * otherwise force out a byte.
     */
    //if (so->so_snd.sb_cc && !callout_active(tp->tt_rexmt) && LUOYU
    //    !callout_active(tp->tt_persist)) {
    //	tp->t_rxtshift = 0;
    //	tcp_setpersist(tp);
    //}

    /*
     * No reason to send a segment, just return.
     */
just_return:
    SOCKBUF_UNLOCK(&so->so_snd);
    return (0);

send:
    SOCKBUF_LOCK_ASSERT(&so->so_snd);
    /*
     * Before ESTABLISHED, force sending of initial options
     * unless TCP set not to do any options.
     * NOTE: we assume that the IP/TCP header plus TCP options
     * always fit in a single mbuf, leaving room for a maximum
     * link header, i.e.
     *	max_linkhdr + sizeof (struct tcpiphdr) + optlen <= MCLBYTES
     */
    optlen = 0;
#ifdef INET6
    if (isipv6)
        hdrlen = sizeof (struct ip6_hdr) + sizeof (struct tcphdr);
    else
#endif
        hdrlen = sizeof (struct tcpiphdr);
    if (flags & TH_SYN)
    {
        tp->snd_nxt = tp->iss;
        if ((tp->t_flags & TF_NOOPT) == 0)
        {
            u_short mss;

            opt[0] = TCPOPT_MAXSEG;
            opt[1] = TCPOLEN_MAXSEG;
            mss = htons((u_short) tcp_mssopt(&tp->t_inpcb->inp_inc));
            (void)memcpy(opt + 2, &mss, sizeof(mss));
            optlen = TCPOLEN_MAXSEG;

            /*
             * If this is the first SYN of connection (not a SYN
             * ACK), include SACK_PERMIT_HDR option.  If this is a
             * SYN ACK, include SACK_PERMIT_HDR option if peer has
             * already done so. This is only for active connect,
             * since the syncache takes care of the passive connect.
             */
            if (tp->sack_enable && ((flags & TH_ACK) == 0 ||
                                    (tp->t_flags & TF_SACK_PERMIT)))
            {
                *((u_int32_t *) (opt + optlen)) =
                    htonl(TCPOPT_SACK_PERMIT_HDR);
                optlen += 4;
            }
            if ((tp->t_flags & TF_REQ_SCALE) &&
                    ((flags & TH_ACK) == 0 ||
                     (tp->t_flags & TF_RCVD_SCALE)))
            {
                *((u_int32_t *)(opt + optlen)) = htonl(
                                                     TCPOPT_NOP << 24 |
                                                     TCPOPT_WINDOW << 16 |
                                                     TCPOLEN_WINDOW << 8 |
                                                     tp->request_r_scale);
                optlen += 4;
            }
        }
    }

    /*
     * Send a timestamp and echo-reply if this is a SYN and our side
     * wants to use timestamps (TF_REQ_TSTMP is set) or both our side
     * and our peer have sent timestamps in our SYN's.
     */
    if ((tp->t_flags & (TF_REQ_TSTMP | TF_NOOPT)) == TF_REQ_TSTMP &&
            (flags & TH_RST) == 0 &&
            ((flags & TH_ACK) == 0 ||
             (tp->t_flags & TF_RCVD_TSTMP)))
    {
        u_int32_t *lp = (u_int32_t *)(opt + optlen);

        /* Form timestamp option as shown in appendix A of RFC 1323. */
        *lp++ = htonl(TCPOPT_TSTAMP_HDR);
        *lp++ = htonl(ticks);
        *lp   = htonl(tp->ts_recent);
        optlen += TCPOLEN_TSTAMP_APPA;
    }

    /*
     * Send SACKs if necessary.  This should be the last option processed.
     * Only as many SACKs are sent as are permitted by the maximum options
     * size.  No more than three SACKs are sent.
     */
    if (tp->sack_enable && tp->t_state == TCPS_ESTABLISHED &&
            (tp->t_flags & (TF_SACK_PERMIT | TF_NOOPT)) == TF_SACK_PERMIT &&
            tp->rcv_numsacks)
    {
        u_int32_t *lp = (u_int32_t *)(opt + optlen);
        u_int32_t *olp = lp++;
        int count = 0;  /* actual number of SACKs inserted */
        int maxsack = (MAX_TCPOPTLEN - (optlen + 4)) / TCPOLEN_SACK;

        tcpstat.tcps_sack_send_blocks++;
        maxsack = min(maxsack, TCP_MAX_SACK);
        for (i = 0; (i < tp->rcv_numsacks && count < maxsack); i++)
        {
            struct sackblk sack = tp->sackblks[i];
            if (sack.start == 0 && sack.end == 0)
                continue;
            *lp++ = htonl(sack.start);
            *lp++ = htonl(sack.end);
            count++;
        }
        *olp = htonl(TCPOPT_SACK_HDR | (TCPOLEN_SACK * count + 2));
        optlen += TCPOLEN_SACK * count + 4; /* including leading NOPs */
    }
    /*
     * Send `CC-family' options if our side wants to use them (TF_REQ_CC),
     * options are allowed (!TF_NOOPT) and it's not a RST.
     */
    if ((tp->t_flags & (TF_REQ_CC | TF_NOOPT)) == TF_REQ_CC &&
            (flags & TH_RST) == 0)
    {
        switch (flags & (TH_SYN | TH_ACK))
        {
            /*
             * This is a normal ACK, send CC if we received CC before
             * from our peer.
             */
        case TH_ACK:
            if (!(tp->t_flags & TF_RCVD_CC))
                break;
            /*FALLTHROUGH*/

            /*
             * We can only get here in T/TCP's SYN_SENT* state, when
             * we're a sending a non-SYN segment without waiting for
             * the ACK of our SYN.  A check above assures that we only
             * do this if our peer understands T/TCP.
             */
        case 0:
            opt[optlen++] = TCPOPT_NOP;
            opt[optlen++] = TCPOPT_NOP;
            opt[optlen++] = TCPOPT_CC;
            opt[optlen++] = TCPOLEN_CC;
            *(u_int32_t *)&opt[optlen] = htonl(tp->cc_send);

            optlen += 4;
            break;

            /*
             * This is our initial SYN, check whether we have to use
             * CC or CC.new.
             */
        case TH_SYN:
            opt[optlen++] = TCPOPT_NOP;
            opt[optlen++] = TCPOPT_NOP;
            opt[optlen++] = tp->t_flags & TF_SENDCCNEW ?
                            TCPOPT_CCNEW : TCPOPT_CC;
            opt[optlen++] = TCPOLEN_CC;
            *(u_int32_t *)&opt[optlen] = htonl(tp->cc_send);
            optlen += 4;
            break;

            /*
             * This is a SYN,ACK; send CC and CC.echo if we received
             * CC from our peer.
             */
        case (TH_SYN|TH_ACK):
            if (tp->t_flags & TF_RCVD_CC)
            {
                opt[optlen++] = TCPOPT_NOP;
                opt[optlen++] = TCPOPT_NOP;
                opt[optlen++] = TCPOPT_CC;
                opt[optlen++] = TCPOLEN_CC;
                *(u_int32_t *)&opt[optlen] =
                    htonl(tp->cc_send);
                optlen += 4;
                opt[optlen++] = TCPOPT_NOP;
                opt[optlen++] = TCPOPT_NOP;
                opt[optlen++] = TCPOPT_CCECHO;
                opt[optlen++] = TCPOLEN_CC;
                *(u_int32_t *)&opt[optlen] =
                    htonl(tp->cc_recv);
                optlen += 4;
            }
            break;
        }
    }

#ifdef TCP_SIGNATURE
#ifdef INET6
    if (!isipv6)
#endif
        if (tp->t_flags & TF_SIGNATURE)
        {
            int i;
            u_char *bp;

            /* Initialize TCP-MD5 option (RFC2385) */
            bp = (u_char *)opt + optlen;
            *bp++ = TCPOPT_SIGNATURE;
            *bp++ = TCPOLEN_SIGNATURE;
            sigoff = optlen + 2;
            for (i = 0; i < TCP_SIGLEN; i++)
                *bp++ = 0;
            optlen += TCPOLEN_SIGNATURE;

            /* Terminate options list and maintain 32-bit alignment. */
            *bp++ = TCPOPT_NOP;
            *bp++ = TCPOPT_EOL;
            optlen += 2;
        }
#endif /* TCP_SIGNATURE */

    hdrlen += optlen;

#ifdef INET6
    if (isipv6)
        ipoptlen = ip6_optlen(tp->t_inpcb);
    else
#endif
        if (tp->t_inpcb->inp_options)
            ipoptlen = tp->t_inpcb->inp_options->m_len -
                       offsetof(struct ipoption, ipopt_list);
        else
예제 #4
0
static void
mv_gpio_debounce(void *arg)
{
	uint8_t raw_read, last_state;
	int pin;
	device_t dev;
	int *debounce_counter;
	struct mv_gpio_softc *sc;
	struct mv_gpio_pindev *s;

	s = (struct mv_gpio_pindev *)arg;
	dev = s->dev;
	pin = s->pin;
	sc = (struct mv_gpio_softc *)device_get_softc(dev);

	MV_GPIO_LOCK();

	raw_read = (mv_gpio_value_get(dev, pin, 1) ? 1 : 0);
	last_state = (mv_gpio_debounced_state_get(dev, pin) ? 1 : 0);
	debounce_counter = &sc->debounce_counters[pin];

	if (raw_read == last_state) {
		if (last_state)
			*debounce_counter = DEBOUNCE_HI_LO_MS /
			    DEBOUNCE_CHECK_MS;
		else
			*debounce_counter = DEBOUNCE_LO_HI_MS /
			    DEBOUNCE_CHECK_MS;

		callout_reset(sc->debounce_callouts[pin],
		    DEBOUNCE_CHECK_TICKS, mv_gpio_debounce, arg);
	} else {
		*debounce_counter = *debounce_counter - 1;
		if (*debounce_counter != 0)
			callout_reset(sc->debounce_callouts[pin],
			    DEBOUNCE_CHECK_TICKS, mv_gpio_debounce, arg);
		else {
			mv_gpio_debounced_state_set(dev, pin, raw_read);

			if (last_state)
				*debounce_counter = DEBOUNCE_HI_LO_MS /
				    DEBOUNCE_CHECK_MS;
			else
				*debounce_counter = DEBOUNCE_LO_HI_MS /
				    DEBOUNCE_CHECK_MS;

			if (((sc->gpio_setup[pin].gp_flags & MV_GPIO_IN_POL_LOW) &&
			    (raw_read == 0)) ||
			    (((sc->gpio_setup[pin].gp_flags & MV_GPIO_IN_POL_LOW) == 0) &&
			    raw_read) ||
			    (sc->gpio_setup[pin].gp_flags & MV_GPIO_IN_IRQ_DOUBLE_EDGE))
				mv_gpio_intr_handler(dev, pin);

			/* Toggle polarity for next edge. */
			mv_gpio_polarity(dev, pin, 0, 1);

			free(arg, M_DEVBUF);
			callout_deactivate(sc->debounce_callouts[pin]);
		}
	}

	MV_GPIO_UNLOCK();
}
예제 #5
0
int
altera_jtag_uart_attach(struct altera_jtag_uart_softc *sc)
{
	struct tty *tp;
	int error;

	AJU_LOCK_INIT(sc);

	/*
	 * XXXRW: Currently, we detect the console solely based on it using a
	 * reserved address, and borrow console-level locks and buffer if so.
	 * Is there a better way?
	 */
	if (rman_get_start(sc->ajus_mem_res) == BERI_UART_BASE) {
		sc->ajus_lockp = &aju_cons_lock;
		sc->ajus_buffer_validp = &aju_cons_buffer_valid;
		sc->ajus_buffer_datap = &aju_cons_buffer_data;
		sc->ajus_jtag_presentp = &aju_cons_jtag_present;
		sc->ajus_jtag_missedp = &aju_cons_jtag_missed;
		sc->ajus_flags |= ALTERA_JTAG_UART_FLAG_CONSOLE;
	} else {
		sc->ajus_lockp = &sc->ajus_lock;
		sc->ajus_buffer_validp = &sc->ajus_buffer_valid;
		sc->ajus_buffer_datap = &sc->ajus_buffer_data;
		sc->ajus_jtag_presentp = &sc->ajus_jtag_present;
		sc->ajus_jtag_missedp = &sc->ajus_jtag_missed;
	}

	/*
	 * Disable interrupts regardless of whether or not we plan to use
	 * them.  We will register an interrupt handler now if they will be
	 * used, but not re-enable intil later once the remainder of the tty
	 * layer is properly initialised, as we're not ready for input yet.
	 */
	AJU_LOCK(sc);
	aju_intr_disable(sc);
	AJU_UNLOCK(sc);
	if (sc->ajus_irq_res != NULL) {
		error = bus_setup_intr(sc->ajus_dev, sc->ajus_irq_res,
		    INTR_ENTROPY | INTR_TYPE_TTY | INTR_MPSAFE, NULL,
		    aju_intr, sc, &sc->ajus_irq_cookie);
		if (error) {
			device_printf(sc->ajus_dev,
			    "could not activate interrupt\n");
			AJU_LOCK_DESTROY(sc);
			return (error);
		}
	}
	tp = sc->ajus_ttyp = tty_alloc(&aju_ttydevsw, sc);
	if (sc->ajus_flags & ALTERA_JTAG_UART_FLAG_CONSOLE) {
		aju_cons_sc = sc;
		tty_init_console(tp, 0);
	}
	tty_makedev(tp, NULL, "%s%d", AJU_TTYNAME, sc->ajus_unit);

	/*
	 * If we will be using interrupts, enable them now; otherwise, start
	 * polling.  From this point onwards, input can arrive.
	 */
	if (sc->ajus_irq_res != NULL) {
		AJU_LOCK(sc);
		aju_intr_readable_enable(sc);
		AJU_UNLOCK(sc);
	} else {
		callout_init(&sc->ajus_io_callout, CALLOUT_MPSAFE);
		callout_reset(&sc->ajus_io_callout, AJU_IO_POLLINTERVAL,
		    aju_io_callout, sc);
	}
	callout_init(&sc->ajus_ac_callout, CALLOUT_MPSAFE);
	callout_reset(&sc->ajus_ac_callout, AJU_AC_POLLINTERVAL,
	    aju_ac_callout, sc);
	return (0);
}
예제 #6
0
static int
ata_siiprb_begin_transaction(struct ata_request *request)
{
    struct ata_pci_controller *ctlr=device_get_softc(device_get_parent(request->parent));
    struct ata_channel *ch = device_get_softc(request->parent);
    struct ata_siiprb_command *prb;
    struct ata_siiprb_dma_prdentry *prd;
    int offset = ch->unit * 0x2000;
    u_int64_t prb_bus;

    /* SOS XXX */
    if (request->u.ata.command == ATA_DEVICE_RESET) {
        request->result = 0;
        return ATA_OP_FINISHED;
    }

    /* get a piece of the workspace for this request */
    prb = (struct ata_siiprb_command *)ch->dma.work;

    /* clear the prb structure */
    bzero(prb, sizeof(struct ata_siiprb_command));

    /* setup the FIS for this request */
    if (!ata_request2fis_h2d(request, &prb->fis[0])) {
        device_printf(request->parent, "setting up SATA FIS failed\n");
        request->result = EIO;
        return ATA_OP_FINISHED;
    }

    /* setup transfer type */
    if (request->flags & ATA_R_ATAPI) {
	bcopy(request->u.atapi.ccb, prb->u.atapi.ccb, 16);
	if (request->flags & ATA_R_ATAPI16)
	    ATA_OUTL(ctlr->r_res2, 0x1000 + offset, 0x00000020);
	else
	    ATA_OUTL(ctlr->r_res2, 0x1004 + offset, 0x00000020);
	if (request->flags & ATA_R_READ)
	    prb->control = htole16(0x0010);
	if (request->flags & ATA_R_WRITE)
	    prb->control = htole16(0x0020);
	prd = &prb->u.atapi.prd[0];
    }
    else
	prd = &prb->u.ata.prd[0];

    /* if request moves data setup and load SG list */
    if (request->flags & (ATA_R_READ | ATA_R_WRITE)) {
	if (ch->dma.load(request, prd, NULL)) {
	    device_printf(request->parent, "setting up DMA failed\n");
	    request->result = EIO;
	    return ATA_OP_FINISHED;
	}
    }

    bus_dmamap_sync(ch->dma.work_tag, ch->dma.work_map, BUS_DMASYNC_PREWRITE);

    /* activate the prb */
    prb_bus = ch->dma.work_bus;
    ATA_OUTL(ctlr->r_res2, 0x1c00 + offset, prb_bus);
    ATA_OUTL(ctlr->r_res2, 0x1c04 + offset, prb_bus>>32);

    /* start the timeout */
    callout_reset(&request->callout, request->timeout * hz,
                  (timeout_t*)ata_timeout, request);
    return ATA_OP_CONTINUES;
}
예제 #7
0
파일: aha.c 프로젝트: ChristosKa/freebsd
static void
ahatimeout(void *arg)
{
	struct aha_ccb	*accb;
	union  ccb	*ccb;
	struct aha_softc *aha;
	uint32_t	paddr;
	struct ccb_hdr *ccb_h;

	accb = (struct aha_ccb *)arg;
	ccb = accb->ccb;
	aha = (struct aha_softc *)ccb->ccb_h.ccb_aha_ptr;
	mtx_assert(&aha->lock, MA_OWNED);
	xpt_print_path(ccb->ccb_h.path);
	printf("CCB %p - timed out\n", (void *)accb);

	if ((accb->flags & ACCB_ACTIVE) == 0) {
		xpt_print_path(ccb->ccb_h.path);
		printf("CCB %p - timed out CCB already completed\n",
		    (void *)accb);
		return;
	}

	/*
	 * In order to simplify the recovery process, we ask the XPT
	 * layer to halt the queue of new transactions and we traverse
	 * the list of pending CCBs and remove their timeouts. This
	 * means that the driver attempts to clear only one error
	 * condition at a time.  In general, timeouts that occur
	 * close together are related anyway, so there is no benefit
	 * in attempting to handle errors in parrallel.  Timeouts will
	 * be reinstated when the recovery process ends.
	 */
	if ((accb->flags & ACCB_DEVICE_RESET) == 0) {
		if ((accb->flags & ACCB_RELEASE_SIMQ) == 0) {
			xpt_freeze_simq(aha->sim, /*count*/1);
			accb->flags |= ACCB_RELEASE_SIMQ;
		}

		ccb_h = LIST_FIRST(&aha->pending_ccbs);
		while (ccb_h != NULL) {
			struct aha_ccb *pending_accb;

			pending_accb = (struct aha_ccb *)ccb_h->ccb_accb_ptr;
			callout_stop(&pending_accb->timer);
			ccb_h = LIST_NEXT(ccb_h, sim_links.le);
		}
	}

	if ((accb->flags & ACCB_DEVICE_RESET) != 0
	 || aha->cur_outbox->action_code != AMBO_FREE) {
		/*
		 * Try a full host adapter/SCSI bus reset.
		 * We do this only if we have already attempted
		 * to clear the condition with a BDR, or we cannot
		 * attempt a BDR for lack of mailbox resources.
		 */
		ccb->ccb_h.status = CAM_CMD_TIMEOUT;
		ahareset(aha, /*hardreset*/TRUE);
		device_printf(aha->dev, "No longer in timeout\n");
	} else {
		/*
		 * Send a Bus Device Reset message:
		 * The target that is holding up the bus may not
		 * be the same as the one that triggered this timeout
		 * (different commands have different timeout lengths),
		 * but we have no way of determining this from our
		 * timeout handler.  Our strategy here is to queue a
		 * BDR message to the target of the timed out command.
		 * If this fails, we'll get another timeout 2 seconds
		 * later which will attempt a bus reset.
		 */
		accb->flags |= ACCB_DEVICE_RESET;
		callout_reset(&accb->timer, 2 * hz, ahatimeout, accb);
		aha->recovery_accb->hccb.opcode = INITIATOR_BUS_DEV_RESET;

		/* No Data Transfer */
		aha->recovery_accb->hccb.datain = TRUE;
		aha->recovery_accb->hccb.dataout = TRUE;
		aha->recovery_accb->hccb.ahastat = 0;
		aha->recovery_accb->hccb.sdstat = 0;
		aha->recovery_accb->hccb.target = ccb->ccb_h.target_id;

		/* Tell the adapter about this command */
		paddr = ahaccbvtop(aha, aha->recovery_accb);
		ahautoa24(paddr, aha->cur_outbox->ccb_addr);
		aha->cur_outbox->action_code = AMBO_START;
		aha_outb(aha, COMMAND_REG, AOP_START_MBOX);
		ahanextoutbox(aha);
	}
}
예제 #8
0
static int32_t
tws_execute_scsi(struct tws_softc *sc, union ccb *ccb)
{
    struct tws_command_packet *cmd_pkt;
    struct tws_request *req;
    struct ccb_hdr *ccb_h = &(ccb->ccb_h);
    struct ccb_scsiio *csio = &(ccb->csio);
    int error;
    u_int16_t lun;

    KKASSERT(lockstatus(&sc->sim_lock, curthread) != 0);
    if (ccb_h->target_id >= TWS_MAX_NUM_UNITS) {
        TWS_TRACE_DEBUG(sc, "traget id too big", ccb_h->target_id, ccb_h->target_lun);
        ccb_h->status |= CAM_TID_INVALID;
        xpt_done(ccb);
        return(0);
    }
    if (ccb_h->target_lun >= TWS_MAX_NUM_LUNS) {
        TWS_TRACE_DEBUG(sc, "target lun 2 big", ccb_h->target_id, ccb_h->target_lun);
        ccb_h->status |= CAM_LUN_INVALID;
        xpt_done(ccb);
        return(0);
    }

    if(ccb_h->flags & CAM_CDB_PHYS) {
        TWS_TRACE_DEBUG(sc, "cdb phy", ccb_h->target_id, ccb_h->target_lun);
        ccb_h->status = CAM_REQ_CMP_ERR;
        xpt_done(ccb);
        return(0);
    }

    /*
     * We are going to work on this request.  Mark it as enqueued (though
     * we don't actually queue it...)
     */
    ccb_h->status |= CAM_SIM_QUEUED;

    req = tws_get_request(sc, TWS_SCSI_IO_REQ);
    if ( !req ) {
        TWS_TRACE_DEBUG(sc, "no reqs", ccb_h->target_id, ccb_h->target_lun);
        /* tws_freeze_simq(sc); */
        ccb_h->status |= CAM_REQUEUE_REQ;
        xpt_done(ccb);
        return(0);
    }

    if((ccb_h->flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
        if(ccb_h->flags & CAM_DIR_IN)
            req->flags = TWS_DIR_IN;
        else
            req->flags = TWS_DIR_OUT;
    } else {
        req->flags = TWS_DIR_NONE; /* no data */
    }

    req->type = TWS_SCSI_IO_REQ;
    req->cb = tws_scsi_complete;

    cmd_pkt = req->cmd_pkt;
    /* cmd_pkt->hdr.header_desc.size_header = 128; */
    cmd_pkt->cmd.pkt_a.res__opcode = TWS_FW_CMD_EXECUTE_SCSI;
    cmd_pkt->cmd.pkt_a.unit = ccb_h->target_id;
    cmd_pkt->cmd.pkt_a.status = 0;
    cmd_pkt->cmd.pkt_a.sgl_offset = 16;

    /* lower nibble */
    lun = ccb_h->target_lun & 0XF;
    lun = lun << 12;
    cmd_pkt->cmd.pkt_a.lun_l4__req_id = lun | req->request_id;
    /* upper nibble */
    lun = ccb_h->target_lun & 0XF0;
    lun = lun << 8;
    cmd_pkt->cmd.pkt_a.lun_h4__sgl_entries = lun;

#ifdef TWS_DEBUG
    if ( csio->cdb_len > 16 )
         TWS_TRACE(sc, "cdb len too big", ccb_h->target_id, csio->cdb_len);
#endif

    if(ccb_h->flags & CAM_CDB_POINTER)
        bcopy(csio->cdb_io.cdb_ptr, cmd_pkt->cmd.pkt_a.cdb, csio->cdb_len);
    else
        bcopy(csio->cdb_io.cdb_bytes, cmd_pkt->cmd.pkt_a.cdb, csio->cdb_len);

    if (!(ccb_h->flags & CAM_DATA_PHYS)) {
         /* Virtual data addresses.  Need to convert them... */
         if (!(ccb_h->flags & CAM_SCATTER_VALID)) {
             if (csio->dxfer_len > TWS_MAX_IO_SIZE) {
                 TWS_TRACE(sc, "I/O is big", csio->dxfer_len, 0);
                 tws_release_request(req);
                 ccb_h->status = CAM_REQ_TOO_BIG;
                 xpt_done(ccb);
                 return(0);
             }

             req->length = csio->dxfer_len;
             if (req->length) {
                 req->data = csio->data_ptr;
                 /* there is 1 sgl_entrie */
                 /* cmd_pkt->cmd.pkt_a.lun_h4__sgl_entries |= 1; */
             }
         } else {
             TWS_TRACE_DEBUG(sc, "got sglist", ccb_h->target_id, ccb_h->target_lun);
             tws_release_request(req);
             ccb_h->status = CAM_REQ_CMP_ERR;
             xpt_done(ccb);
             return(0);
         }
    } else {
         /* Data addresses are physical. */
         TWS_TRACE_DEBUG(sc, "Phy data addr", ccb_h->target_id, ccb_h->target_lun);
         tws_release_request(req);
         ccb_h->status = CAM_REQ_CMP_ERR;
         ccb_h->status |= CAM_RELEASE_SIMQ;
         ccb_h->status &= ~CAM_SIM_QUEUED;
         xpt_done(ccb);
         return(0);
    }
    /* save ccb ptr */
    req->ccb_ptr = ccb;
    /*
     * tws_map_load_data_callback will fill in the SGL,
     * and submit the I/O.
     */
    sc->stats.scsi_ios++;
    callout_reset(&ccb_h->timeout_ch, (ccb_h->timeout * hz)/1000, tws_timeout,
	req);
    error = tws_map_request(sc, req);
    return(error);
}
예제 #9
0
static void
adv_execute_ccb(void *arg, bus_dma_segment_t *dm_segs,
		int nsegments, int error)
{
	struct	ccb_scsiio *csio;
	struct	ccb_hdr *ccb_h;
	struct	cam_sim *sim;
        struct	adv_softc *adv;
	struct	adv_ccb_info *cinfo;
	struct	adv_scsi_q scsiq;
	struct	adv_sg_head sghead;

	csio = (struct ccb_scsiio *)arg;
	ccb_h = &csio->ccb_h;
	sim = xpt_path_sim(ccb_h->path);
	adv = (struct adv_softc *)cam_sim_softc(sim);
	cinfo = (struct adv_ccb_info *)csio->ccb_h.ccb_cinfo_ptr;

	/*
	 * Setup our done routine to release the simq on
	 * the next ccb that completes.
	 */
	if ((adv->state & ADV_BUSDMA_BLOCK) != 0)
		adv->state |= ADV_BUSDMA_BLOCK_CLEARED;

	if ((ccb_h->flags & CAM_CDB_POINTER) != 0) {
		if ((ccb_h->flags & CAM_CDB_PHYS) == 0) {
			/* XXX Need phystovirt!!!! */
			/* How about pmap_kenter??? */
			scsiq.cdbptr = csio->cdb_io.cdb_ptr;
		} else {
			scsiq.cdbptr = csio->cdb_io.cdb_ptr;
		}
	} else {
		scsiq.cdbptr = csio->cdb_io.cdb_bytes;
	}
	/*
	 * Build up the request
	 */
	scsiq.q1.status = 0;
	scsiq.q1.q_no = 0;
	scsiq.q1.cntl = 0;
	scsiq.q1.sg_queue_cnt = 0;
	scsiq.q1.target_id = ADV_TID_TO_TARGET_MASK(ccb_h->target_id);
	scsiq.q1.target_lun = ccb_h->target_lun;
	scsiq.q1.sense_len = csio->sense_len;
	scsiq.q1.extra_bytes = 0;
	scsiq.q2.ccb_index = cinfo - adv->ccb_infos;
	scsiq.q2.target_ix = ADV_TIDLUN_TO_IX(ccb_h->target_id,
					      ccb_h->target_lun);
	scsiq.q2.flag = 0;
	scsiq.q2.cdb_len = csio->cdb_len;
	if ((ccb_h->flags & CAM_TAG_ACTION_VALID) != 0)
		scsiq.q2.tag_code = csio->tag_action;
	else
		scsiq.q2.tag_code = 0;
	scsiq.q2.vm_id = 0;

	if (nsegments != 0) {
		bus_dmasync_op_t op;

		scsiq.q1.data_addr = dm_segs->ds_addr;
                scsiq.q1.data_cnt = dm_segs->ds_len;
		if (nsegments > 1) {
			scsiq.q1.cntl |= QC_SG_HEAD;
			sghead.entry_cnt
			    = sghead.entry_to_copy
			    = nsegments;
			sghead.res = 0;
			sghead.sg_list = adv_fixup_dmasegs(adv, dm_segs);
			scsiq.sg_head = &sghead;
		} else {
			scsiq.sg_head = NULL;
		}
		if ((ccb_h->flags & CAM_DIR_MASK) == CAM_DIR_IN)
			op = BUS_DMASYNC_PREREAD;
		else
			op = BUS_DMASYNC_PREWRITE;
		bus_dmamap_sync(adv->buffer_dmat, cinfo->dmamap, op);
	} else {
		scsiq.q1.data_addr = 0;	
		scsiq.q1.data_cnt = 0;
		scsiq.sg_head = NULL;
	}


	crit_enter();
	/*
	 * Last time we need to check if this SCB needs to
	 * be aborted.
	 */             
	if (ccb_h->status != CAM_REQ_INPROG) {
		if (nsegments != 0)
			bus_dmamap_unload(adv->buffer_dmat, cinfo->dmamap);
		adv_clear_state(adv, (union ccb *)csio);
		adv_free_ccb_info(adv, cinfo);
		xpt_done((union ccb *)csio);
		crit_exit();
		return;
	}

	if (adv_execute_scsi_queue(adv, &scsiq, csio->dxfer_len) != 0) {
		/* Temporary resource shortage */
		adv_set_state(adv, ADV_RESOURCE_SHORTAGE);
		if (nsegments != 0)
			bus_dmamap_unload(adv->buffer_dmat, cinfo->dmamap);
		csio->ccb_h.status = CAM_REQUEUE_REQ;
		adv_clear_state(adv, (union ccb *)csio);
		adv_free_ccb_info(adv, cinfo);
		xpt_done((union ccb *)csio);
		crit_exit();
		return;
	}
	cinfo->state |= ACCB_ACTIVE;
	ccb_h->status |= CAM_SIM_QUEUED;
	LIST_INSERT_HEAD(&adv->pending_ccbs, ccb_h, sim_links.le);
	/* Schedule our timeout */
	callout_reset(ccb_h->timeout_ch, (ccb_h->timeout * hz) / 1000,
		      adv_timeout, csio);
	crit_exit();
}
예제 #10
0
int
ata_attach(device_t dev)
{
    struct ata_channel *ch = device_get_softc(dev);
    int error, rid;
    struct cam_devq *devq;
    const char *res;
    char buf[64];
    int i, mode;

    /* check that we have a virgin channel to attach */
    if (ch->r_irq)
	return EEXIST;

    /* initialize the softc basics */
    ch->dev = dev;
    ch->state = ATA_IDLE;
    bzero(&ch->state_mtx, sizeof(struct mtx));
    mtx_init(&ch->state_mtx, "ATA state lock", NULL, MTX_DEF);
    TASK_INIT(&ch->conntask, 0, ata_conn_event, dev);
	for (i = 0; i < 16; i++) {
		ch->user[i].revision = 0;
		snprintf(buf, sizeof(buf), "dev%d.sata_rev", i);
		if (resource_int_value(device_get_name(dev),
		    device_get_unit(dev), buf, &mode) != 0 &&
		    resource_int_value(device_get_name(dev),
		    device_get_unit(dev), "sata_rev", &mode) != 0)
			mode = -1;
		if (mode >= 0)
			ch->user[i].revision = mode;
		ch->user[i].mode = 0;
		snprintf(buf, sizeof(buf), "dev%d.mode", i);
		if (resource_string_value(device_get_name(dev),
		    device_get_unit(dev), buf, &res) == 0)
			mode = ata_str2mode(res);
		else if (resource_string_value(device_get_name(dev),
		    device_get_unit(dev), "mode", &res) == 0)
			mode = ata_str2mode(res);
		else
			mode = -1;
		if (mode >= 0)
			ch->user[i].mode = mode;
		if (ch->flags & ATA_SATA)
			ch->user[i].bytecount = 8192;
		else
			ch->user[i].bytecount = MAXPHYS;
		ch->user[i].caps = 0;
		ch->curr[i] = ch->user[i];
		if (ch->flags & ATA_SATA) {
			if (ch->pm_level > 0)
				ch->user[i].caps |= CTS_SATA_CAPS_H_PMREQ;
			if (ch->pm_level > 1)
				ch->user[i].caps |= CTS_SATA_CAPS_D_PMREQ;
		} else {
			if (!(ch->flags & ATA_NO_48BIT_DMA))
				ch->user[i].caps |= CTS_ATA_CAPS_H_DMA48;
		}
	}
	callout_init(&ch->poll_callout, 1);

    /* allocate DMA resources if DMA HW present*/
    if (ch->dma.alloc)
	ch->dma.alloc(dev);

    /* setup interrupt delivery */
    rid = ATA_IRQ_RID;
    ch->r_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
				       RF_SHAREABLE | RF_ACTIVE);
    if (!ch->r_irq) {
	device_printf(dev, "unable to allocate interrupt\n");
	return ENXIO;
    }
    if ((error = bus_setup_intr(dev, ch->r_irq, ATA_INTR_FLAGS, NULL,
				ata_interrupt, ch, &ch->ih))) {
	bus_release_resource(dev, SYS_RES_IRQ, rid, ch->r_irq);
	device_printf(dev, "unable to setup interrupt\n");
	return error;
    }

	if (ch->flags & ATA_PERIODIC_POLL)
		callout_reset(&ch->poll_callout, hz, ata_periodic_poll, ch);
	mtx_lock(&ch->state_mtx);
	/* Create the device queue for our SIM. */
	devq = cam_simq_alloc(1);
	if (devq == NULL) {
		device_printf(dev, "Unable to allocate simq\n");
		error = ENOMEM;
		goto err1;
	}
	/* Construct SIM entry */
	ch->sim = cam_sim_alloc(ataaction, atapoll, "ata", ch,
	    device_get_unit(dev), &ch->state_mtx, 1, 0, devq);
	if (ch->sim == NULL) {
		device_printf(dev, "unable to allocate sim\n");
		cam_simq_free(devq);
		error = ENOMEM;
		goto err1;
	}
	if (xpt_bus_register(ch->sim, dev, 0) != CAM_SUCCESS) {
		device_printf(dev, "unable to register xpt bus\n");
		error = ENXIO;
		goto err2;
	}
	if (xpt_create_path(&ch->path, /*periph*/NULL, cam_sim_path(ch->sim),
	    CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
		device_printf(dev, "unable to create path\n");
		error = ENXIO;
		goto err3;
	}
	mtx_unlock(&ch->state_mtx);
	return (0);

err3:
	xpt_bus_deregister(cam_sim_path(ch->sim));
err2:
	cam_sim_free(ch->sim, /*free_devq*/TRUE);
	ch->sim = NULL;
err1:
	bus_release_resource(dev, SYS_RES_IRQ, rid, ch->r_irq);
	mtx_unlock(&ch->state_mtx);
	if (ch->flags & ATA_PERIODIC_POLL)
		callout_drain(&ch->poll_callout);
	return (error);
}
예제 #11
0
파일: tda.c 프로젝트: ryo/netbsd-src
void
tda_attach(device_t parent, device_t self, void *aux)
{
	struct tda_softc *sc = device_private(self);
	struct i2c_attach_args *ia = aux;
	int rc;

	sc->sc_dev = self;
	sc->sc_tag = ia->ia_tag;
	sc->sc_addr = ia->ia_addr;

	aprint_normal(": %s\n", ia->ia_name);
	aprint_naive(": Environment sensor\n");

	/*
	 * Set the fans to maximum speed and save the power levels;
	 * the controller is write-only.
	 */
	sc->sc_cfan_speed = sc->sc_sfan_speed = (TDA_FANSPEED_MAX+TDA_FANSPEED_MIN)/2;
	tda_setspeed(sc);
	
	callout_init(&sc->sc_timer, CALLOUT_MPSAFE);
	callout_reset(&sc->sc_timer, hz*20, tda_timeout, sc);

	/* Initialise sensor data */
	sc->sc_sensor[SENSOR_FAN_CPU].state = ENVSYS_SINVALID;
	sc->sc_sensor[SENSOR_FAN_CPU].units = ENVSYS_INTEGER;
	sc->sc_sensor[SENSOR_FAN_CPU].flags = ENVSYS_FMONNOTSUPP;
	strlcpy(sc->sc_sensor[SENSOR_FAN_CPU].desc,
	    "fan.cpu",sizeof("fan.cpu"));
	sc->sc_sensor[SENSOR_FAN_SYS].state = ENVSYS_SINVALID;
	sc->sc_sensor[SENSOR_FAN_SYS].units = ENVSYS_INTEGER;
	sc->sc_sensor[SENSOR_FAN_SYS].flags = ENVSYS_FMONNOTSUPP;
	strlcpy(sc->sc_sensor[SENSOR_FAN_SYS].desc,
	    "fan.sys",sizeof("fan.sys"));
	sc->sc_sme = sysmon_envsys_create();
	rc = sysmon_envsys_sensor_attach(
	    sc->sc_sme, &sc->sc_sensor[SENSOR_FAN_CPU]);
	if (rc) {
		sysmon_envsys_destroy(sc->sc_sme);
		aprint_error_dev(self,
		    "unable to attach cpu fan at sysmon, error %d\n", rc);
		return;
	}
	rc = sysmon_envsys_sensor_attach(
	    sc->sc_sme, &sc->sc_sensor[SENSOR_FAN_SYS]);
	if (rc) {
		sysmon_envsys_destroy(sc->sc_sme);
		aprint_error_dev(self,
		    "unable to attach sys fan at sysmon, error %d\n", rc);
		return;
	}
        sc->sc_sme->sme_name = device_xname(self);
        sc->sc_sme->sme_cookie = sc;
        sc->sc_sme->sme_refresh = tda_refresh;
	rc = sysmon_envsys_register(sc->sc_sme);
	if (rc) {
		aprint_error_dev(self,
		    "unable to register with sysmon, error %d\n", rc);
		sysmon_envsys_destroy(sc->sc_sme);
		return;
	}
}
예제 #12
0
/*
 * Handle a radar detection event on a channel. The channel is
 * added to the NOL list and we record the time of the event.
 * Entries are aged out after NOL_TIMEOUT.  If radar was
 * detected while doing CAC we force a state/channel change.
 * Otherwise radar triggers a channel switch using the CSA
 * mechanism (when the channel is the bss channel).
 */
void
ieee80211_dfs_notify_radar(struct ieee80211com *ic, struct ieee80211_channel *chan)
{
	struct ieee80211_dfs_state *dfs = &ic->ic_dfs;
	int i, now;

	IEEE80211_LOCK_ASSERT(ic);

	/*
	 * If doing DFS debugging (mode 2), don't bother
	 * running the rest of this function.
	 *
	 * Simply announce the presence of the radar and continue
	 * along merrily.
	 */
	if (ieee80211_dfs_debug == DFS_DBG_NOCSANOL) {
		announce_radar(ic, chan, chan);
		ieee80211_notify_radar(ic, chan);
		return;
	}

	/*
	 * Don't mark the channel and don't put it into NOL
	 * if we're doing DFS debugging.
	 */
	if (ieee80211_dfs_debug == DFS_DBG_NONE) {
		/*
		 * Mark all entries with this frequency.  Notify user
		 * space and arrange for notification when the radar
		 * indication is cleared.  Then kick the NOL processing
		 * thread if not already running.
		 */
		now = ticks;
		for (i = 0; i < ic->ic_nchans; i++) {
			struct ieee80211_channel *c = &ic->ic_channels[i];
			if (c->ic_freq == chan->ic_freq) {
				c->ic_state &= ~IEEE80211_CHANSTATE_CACDONE;
				c->ic_state |= IEEE80211_CHANSTATE_RADAR;
				dfs->nol_event[i] = now;
			}
		}
		ieee80211_notify_radar(ic, chan);
		chan->ic_state |= IEEE80211_CHANSTATE_NORADAR;
		if (!callout_pending(&dfs->nol_timer))
			callout_reset(&dfs->nol_timer, NOL_TIMEOUT,
			    dfs_timeout, ic);
	}

	/*
	 * If radar is detected on the bss channel while
	 * doing CAC; force a state change by scheduling the
	 * callout to be dispatched asap.  Otherwise, if this
	 * event is for the bss channel then we must quiet
	 * traffic and schedule a channel switch.
	 *
	 * Note this allows us to receive notification about
	 * channels other than the bss channel; not sure
	 * that can/will happen but it's simple to support.
	 */
	if (chan == ic->ic_bsschan) {
		/* XXX need a way to defer to user app */

		/*
		 * Don't flip over to a new channel if
		 * we are currently doing DFS debugging.
		 */
		if (ieee80211_dfs_debug == DFS_DBG_NONE)
			dfs->newchan = ieee80211_dfs_pickchannel(ic);
		else
			dfs->newchan = chan;

		announce_radar(ic, chan, dfs->newchan);

		if (callout_pending(&dfs->cac_timer))
			callout_schedule(&dfs->cac_timer, 0);
		else if (dfs->newchan != NULL) {
			/* XXX mode 1, switch count 2 */
			/* XXX calculate switch count based on max
			  switch time and beacon interval? */
			ieee80211_csa_startswitch(ic, dfs->newchan, 1, 2);
		} else {
			/*
			 * Spec says to stop all transmissions and
			 * wait on the current channel for an entry
			 * on the NOL to expire.
			 */
			/*XXX*/
			ic_printf(ic, "%s: No free channels; waiting for entry "
			    "on NOL to expire\n", __func__);
		}
	} else {
		/*
		 * Issue rate-limited console msgs.
		 */
		if (dfs->lastchan != chan) {
			dfs->lastchan = chan;
			dfs->cureps = 0;
			announce_radar(ic, chan, NULL);
		} else if (ppsratecheck(&dfs->lastevent, &dfs->cureps, 1)) {
			announce_radar(ic, chan, NULL);
		}
	}
}
예제 #13
0
파일: ethernet.c 프로젝트: JabirTech/Source
/**
 * Module/ driver initialization. Creates the linux network
 * devices.
 *
 * @return Zero on success
 */
int cvm_oct_init_module(device_t bus)
{
	device_t dev;
	int ifnum;
	int num_interfaces;
	int interface;
	int fau = FAU_NUM_PACKET_BUFFERS_TO_FREE;
	int qos;

	cvm_oct_rx_initialize();
	cvm_oct_configure_common_hw(bus);

	cvmx_helper_initialize_packet_io_global();

	/* Change the input group for all ports before input is enabled */
	num_interfaces = cvmx_helper_get_number_of_interfaces();
	for (interface = 0; interface < num_interfaces; interface++) {
		int num_ports = cvmx_helper_ports_on_interface(interface);
		int port;

		for (port = 0; port < num_ports; port++) {
			cvmx_pip_prt_tagx_t pip_prt_tagx;
			int pkind = cvmx_helper_get_ipd_port(interface, port);

			pip_prt_tagx.u64 = cvmx_read_csr(CVMX_PIP_PRT_TAGX(pkind));
			pip_prt_tagx.s.grp = pow_receive_group;
			cvmx_write_csr(CVMX_PIP_PRT_TAGX(pkind), pip_prt_tagx.u64);
		}
	}

	cvmx_helper_ipd_and_packet_input_enable();

	memset(cvm_oct_device, 0, sizeof(cvm_oct_device));

	cvm_oct_link_taskq = taskqueue_create("octe link", M_NOWAIT,
	    taskqueue_thread_enqueue, &cvm_oct_link_taskq);
	taskqueue_start_threads(&cvm_oct_link_taskq, 1, PI_NET,
	    "octe link taskq");

	/* Initialize the FAU used for counting packet buffers that need to be freed */
	cvmx_fau_atomic_write32(FAU_NUM_PACKET_BUFFERS_TO_FREE, 0);

	ifnum = 0;
	num_interfaces = cvmx_helper_get_number_of_interfaces();
	for (interface = 0; interface < num_interfaces; interface++) {
		cvmx_helper_interface_mode_t imode = cvmx_helper_interface_get_mode(interface);
		int num_ports = cvmx_helper_ports_on_interface(interface);
		int port;

		for (port = cvmx_helper_get_ipd_port(interface, 0);
		     port < cvmx_helper_get_ipd_port(interface, num_ports);
		     ifnum++, port++) {
			cvm_oct_private_t *priv;
			struct ifnet *ifp;
			
			dev = BUS_ADD_CHILD(bus, 0, "octe", ifnum);
			if (dev != NULL)
				ifp = if_alloc(IFT_ETHER);
			if (dev == NULL || ifp == NULL) {
				printf("Failed to allocate ethernet device for interface %d port %d\n", interface, port);
				continue;
			}

			/* Initialize the device private structure. */
			device_probe(dev);
			priv = device_get_softc(dev);
			priv->dev = dev;
			priv->ifp = ifp;
			priv->imode = imode;
			priv->port = port;
			priv->queue = cvmx_pko_get_base_queue(priv->port);
			priv->fau = fau - cvmx_pko_get_num_queues(port) * 4;
			for (qos = 0; qos < cvmx_pko_get_num_queues(port); qos++)
				cvmx_fau_atomic_write32(priv->fau+qos*4, 0);
			TASK_INIT(&priv->link_task, 0, cvm_oct_update_link, priv);

			switch (priv->imode) {

			/* These types don't support ports to IPD/PKO */
			case CVMX_HELPER_INTERFACE_MODE_DISABLED:
			case CVMX_HELPER_INTERFACE_MODE_PCIE:
			case CVMX_HELPER_INTERFACE_MODE_PICMG:
				break;

			case CVMX_HELPER_INTERFACE_MODE_NPI:
				priv->init = cvm_oct_common_init;
				priv->uninit = cvm_oct_common_uninit;
				device_set_desc(dev, "Cavium Octeon NPI Ethernet");
				break;

			case CVMX_HELPER_INTERFACE_MODE_XAUI:
				priv->init = cvm_oct_xaui_init;
				priv->uninit = cvm_oct_common_uninit;
				device_set_desc(dev, "Cavium Octeon XAUI Ethernet");
				break;

			case CVMX_HELPER_INTERFACE_MODE_LOOP:
				priv->init = cvm_oct_common_init;
				priv->uninit = cvm_oct_common_uninit;
				device_set_desc(dev, "Cavium Octeon LOOP Ethernet");
				break;

			case CVMX_HELPER_INTERFACE_MODE_SGMII:
				priv->init = cvm_oct_sgmii_init;
				priv->uninit = cvm_oct_common_uninit;
				device_set_desc(dev, "Cavium Octeon SGMII Ethernet");
				break;

			case CVMX_HELPER_INTERFACE_MODE_SPI:
				priv->init = cvm_oct_spi_init;
				priv->uninit = cvm_oct_spi_uninit;
				device_set_desc(dev, "Cavium Octeon SPI Ethernet");
				break;

			case CVMX_HELPER_INTERFACE_MODE_RGMII:
				priv->init = cvm_oct_rgmii_init;
				priv->uninit = cvm_oct_rgmii_uninit;
				device_set_desc(dev, "Cavium Octeon RGMII Ethernet");
				break;

			case CVMX_HELPER_INTERFACE_MODE_GMII:
				priv->init = cvm_oct_rgmii_init;
				priv->uninit = cvm_oct_rgmii_uninit;
				device_set_desc(dev, "Cavium Octeon GMII Ethernet");
				break;
			}

			ifp->if_softc = priv;

			if (!priv->init) {
				printf("octe%d: unsupported device type interface %d, port %d\n",
				       ifnum, interface, priv->port);
				if_free(ifp);
			} else if (priv->init(ifp) != 0) {
				printf("octe%d: failed to register device for interface %d, port %d\n",
				       ifnum, interface, priv->port);
				if_free(ifp);
			} else {
				cvm_oct_device[priv->port] = ifp;
				fau -= cvmx_pko_get_num_queues(priv->port) * sizeof(uint32_t);
			}
		}
	}

	if (INTERRUPT_LIMIT) {
		/* Set the POW timer rate to give an interrupt at most INTERRUPT_LIMIT times per second */
		cvmx_write_csr(CVMX_POW_WQ_INT_PC, cvmx_clock_get_rate(CVMX_CLOCK_CORE)/(INTERRUPT_LIMIT*16*256)<<8);

		/* Enable POW timer interrupt. It will count when there are packets available */
		cvmx_write_csr(CVMX_POW_WQ_INT_THRX(pow_receive_group), 0x1ful<<24);
	} else {
		/* Enable POW interrupt when our port has at least one packet */
		cvmx_write_csr(CVMX_POW_WQ_INT_THRX(pow_receive_group), 0x1001);
	}

	callout_init(&cvm_oct_poll_timer, CALLOUT_MPSAFE);
	callout_reset(&cvm_oct_poll_timer, hz, cvm_do_timer, NULL);

	return 0;
}
예제 #14
0
/*
 * Handle a radar detection event on a channel. The channel is
 * added to the NOL list and we record the time of the event.
 * Entries are aged out after NOL_TIMEOUT.  If radar was
 * detected while doing CAC we force a state/channel change.
 * Otherwise radar triggers a channel switch using the CSA
 * mechanism (when the channel is the bss channel).
 */
void
ieee80211_dfs_notify_radar(struct ieee80211com *ic, struct ieee80211_channel *chan)
{
	struct ieee80211_dfs_state *dfs = &ic->ic_dfs;
	int i, now;

	/*
	 * Mark all entries with this frequency.  Notify user
	 * space and arrange for notification when the radar
	 * indication is cleared.  Then kick the NOL processing
	 * thread if not already running.
	 */
	now = ticks;
	for (i = 0; i < ic->ic_nchans; i++) {
		struct ieee80211_channel *c = &ic->ic_channels[i];
		if (c->ic_freq == chan->ic_freq) {
			c->ic_state &= ~IEEE80211_CHANSTATE_CACDONE;
			c->ic_state |= IEEE80211_CHANSTATE_RADAR;
			dfs->nol_event[i] = now;
		}
	}
	ieee80211_notify_radar(ic, chan);
	chan->ic_state |= IEEE80211_CHANSTATE_NORADAR;
	if (!callout_pending(&dfs->nol_timer)) {
		callout_reset(&dfs->nol_timer, NOL_TIMEOUT,
				dfs_timeout_callout, ic);
	}

	/*
	 * If radar is detected on the bss channel while
	 * doing CAC; force a state change by scheduling the
	 * callout to be dispatched asap.  Otherwise, if this
	 * event is for the bss channel then we must quiet
	 * traffic and schedule a channel switch.
	 *
	 * Note this allows us to receive notification about
	 * channels other than the bss channel; not sure
	 * that can/will happen but it's simple to support.
	 */
	if (chan == ic->ic_bsschan) {
		/* XXX need a way to defer to user app */
		dfs->newchan = ieee80211_dfs_pickchannel(ic);

		announce_radar(ic->ic_ifp, chan, dfs->newchan);

#ifdef notyet
		if (callout_pending(&dfs->cac_timer)) {
			callout_reset(&dfs->cac_timer, 0,
					cac_timeout_callout, vap);
		}
		else if (dfs->newchan != NULL) {
			/* XXX mode 1, switch count 2 */
			/* XXX calculate switch count based on max
			  switch time and beacon interval? */
			ieee80211_csa_startswitch(ic, dfs->newchan, 1, 2);
		} else {
			/*
			 * Spec says to stop all transmissions and
			 * wait on the current channel for an entry
			 * on the NOL to expire.
			 */
			/*XXX*/
		}
#endif
	} else {
		/*
		 * Issue rate-limited console msgs.
		 */
		if (dfs->lastchan != chan) {
			dfs->lastchan = chan;
			dfs->cureps = 0;
			announce_radar(ic->ic_ifp, chan, NULL);
		} else if (ppsratecheck(&dfs->lastevent, &dfs->cureps, 1)) {
			announce_radar(ic->ic_ifp, chan, NULL);
		}
	}
}
예제 #15
0
void
tcp_timer_2msl(void *xtp)
{
	struct tcpcb *tp = xtp;
	struct inpcb *inp;
	CURVNET_SET(tp->t_vnet);
#ifdef TCPDEBUG
	int ostate;

	ostate = tp->t_state;
#endif
	inp = tp->t_inpcb;
	KASSERT(inp != NULL, ("%s: tp %p tp->t_inpcb == NULL", __func__, tp));
	INP_WLOCK(inp);
	tcp_free_sackholes(tp);
	if (callout_pending(&tp->t_timers->tt_2msl) ||
	    !callout_active(&tp->t_timers->tt_2msl)) {
		INP_WUNLOCK(tp->t_inpcb);
		CURVNET_RESTORE();
		return;
	}
	callout_deactivate(&tp->t_timers->tt_2msl);
	if ((inp->inp_flags & INP_DROPPED) != 0) {
		INP_WUNLOCK(inp);
		CURVNET_RESTORE();
		return;
	}
	KASSERT((tp->t_timers->tt_flags & TT_STOPPED) == 0,
		("%s: tp %p tcpcb can't be stopped here", __func__, tp));
	/*
	 * 2 MSL timeout in shutdown went off.  If we're closed but
	 * still waiting for peer to close and connection has been idle
	 * too long delete connection control block.  Otherwise, check
	 * again in a bit.
	 *
	 * If in TIME_WAIT state just ignore as this timeout is handled in
	 * tcp_tw_2msl_scan().
	 *
	 * If fastrecycle of FIN_WAIT_2, in FIN_WAIT_2 and receiver has closed, 
	 * there's no point in hanging onto FIN_WAIT_2 socket. Just close it. 
	 * Ignore fact that there were recent incoming segments.
	 */
	if ((inp->inp_flags & INP_TIMEWAIT) != 0) {
		INP_WUNLOCK(inp);
		CURVNET_RESTORE();
		return;
	}
	if (tcp_fast_finwait2_recycle && tp->t_state == TCPS_FIN_WAIT_2 &&
	    tp->t_inpcb && tp->t_inpcb->inp_socket && 
	    (tp->t_inpcb->inp_socket->so_rcv.sb_state & SBS_CANTRCVMORE)) {
		TCPSTAT_INC(tcps_finwait2_drops);
		if (tcp_inpinfo_lock_add(inp)) {
			tcp_inpinfo_lock_del(inp, tp);
			goto out;
		}
		tp = tcp_close(tp);             
		tcp_inpinfo_lock_del(inp, tp);
		goto out;
	} else {
		if (ticks - tp->t_rcvtime <= TP_MAXIDLE(tp)) {
			callout_reset(&tp->t_timers->tt_2msl,
				      TP_KEEPINTVL(tp), tcp_timer_2msl, tp);
		} else {
			if (tcp_inpinfo_lock_add(inp)) {
				tcp_inpinfo_lock_del(inp, tp);
				goto out;
			}
			tp = tcp_close(tp);
			tcp_inpinfo_lock_del(inp, tp);
			goto out;
		}
       }

#ifdef TCPDEBUG
	if (tp != NULL && (tp->t_inpcb->inp_socket->so_options & SO_DEBUG))
		tcp_trace(TA_USER, ostate, tp, (void *)0, (struct tcphdr *)0,
			  PRU_SLOWTIMO);
#endif
	TCP_PROBE2(debug__user, tp, PRU_SLOWTIMO);

	if (tp != NULL)
		INP_WUNLOCK(inp);
out:
	CURVNET_RESTORE();
}
예제 #16
0
void
adv_timeout(void *arg)
{
	union ccb *ccb;
	struct adv_softc *adv;
	struct adv_ccb_info *cinfo;

	ccb = (union ccb *)arg;
	adv = (struct adv_softc *)xpt_path_sim(ccb->ccb_h.path)->softc;
	cinfo = (struct adv_ccb_info *)ccb->ccb_h.ccb_cinfo_ptr;

	xpt_print_path(ccb->ccb_h.path);
	kprintf("Timed out\n");

	crit_enter();
	/* Have we been taken care of already?? */
	if (cinfo == NULL || cinfo->state == ACCB_FREE) {
		crit_exit();
		return;
	}

	adv_stop_execution(adv);

	if ((cinfo->state & ACCB_ABORT_QUEUED) == 0) {
		struct ccb_hdr *ccb_h;

		/*
		 * In order to simplify the recovery process, we ask the XPT
		 * layer to halt the queue of new transactions and we traverse
		 * the list of pending CCBs and remove their timeouts. This
		 * means that the driver attempts to clear only one error
		 * condition at a time.  In general, timeouts that occur
		 * close together are related anyway, so there is no benefit
		 * in attempting to handle errors in parrallel.  Timeouts will
		 * be reinstated when the recovery process ends.
		 */
		adv_set_state(adv, ADV_IN_TIMEOUT);

		/* This CCB is the CCB representing our recovery actions */
		cinfo->state |= ACCB_RECOVERY_CCB|ACCB_ABORT_QUEUED;

		ccb_h = LIST_FIRST(&adv->pending_ccbs);
		while (ccb_h != NULL) {
			callout_stop(ccb_h->timeout_ch);
			ccb_h = LIST_NEXT(ccb_h, sim_links.le);
		}

		/* XXX Should send a BDR */
		/* Attempt an abort as our first tact */
		xpt_print_path(ccb->ccb_h.path);
		kprintf("Attempting abort\n");
		adv_abort_ccb(adv, ccb->ccb_h.target_id,
			      ccb->ccb_h.target_lun, ccb,
			      CAM_CMD_TIMEOUT, /*queued_only*/FALSE);
		callout_reset(ccb->ccb_h.timeout_ch, 2 * hz, adv_timeout, ccb);
	} else {
		/* Our attempt to perform an abort failed, go for a reset */
		xpt_print_path(ccb->ccb_h.path);
		kprintf("Resetting bus\n");		
		ccb->ccb_h.status &= ~CAM_STATUS_MASK;
		ccb->ccb_h.status |= CAM_CMD_TIMEOUT;
		adv_reset_bus(adv, /*initiate_reset*/TRUE);
	}
	adv_start_execution(adv);
	crit_exit();
}
예제 #17
0
void
tcp_timer_keep(void *xtp)
{
	struct tcpcb *tp = xtp;
	struct tcptemp *t_template;
	struct inpcb *inp;
	CURVNET_SET(tp->t_vnet);
#ifdef TCPDEBUG
	int ostate;

	ostate = tp->t_state;
#endif
	inp = tp->t_inpcb;
	KASSERT(inp != NULL, ("%s: tp %p tp->t_inpcb == NULL", __func__, tp));
	INP_WLOCK(inp);
	if (callout_pending(&tp->t_timers->tt_keep) ||
	    !callout_active(&tp->t_timers->tt_keep)) {
		INP_WUNLOCK(inp);
		CURVNET_RESTORE();
		return;
	}
	callout_deactivate(&tp->t_timers->tt_keep);
	if ((inp->inp_flags & INP_DROPPED) != 0) {
		INP_WUNLOCK(inp);
		CURVNET_RESTORE();
		return;
	}
	KASSERT((tp->t_timers->tt_flags & TT_STOPPED) == 0,
		("%s: tp %p tcpcb can't be stopped here", __func__, tp));
	/*
	 * Keep-alive timer went off; send something
	 * or drop connection if idle for too long.
	 */
	TCPSTAT_INC(tcps_keeptimeo);
	if (tp->t_state < TCPS_ESTABLISHED)
		goto dropit;
	if ((always_keepalive || inp->inp_socket->so_options & SO_KEEPALIVE) &&
	    tp->t_state <= TCPS_CLOSING) {
		if (ticks - tp->t_rcvtime >= TP_KEEPIDLE(tp) + TP_MAXIDLE(tp))
			goto dropit;
		/*
		 * Send a packet designed to force a response
		 * if the peer is up and reachable:
		 * either an ACK if the connection is still alive,
		 * or an RST if the peer has closed the connection
		 * due to timeout or reboot.
		 * Using sequence number tp->snd_una-1
		 * causes the transmitted zero-length segment
		 * to lie outside the receive window;
		 * by the protocol spec, this requires the
		 * correspondent TCP to respond.
		 */
		TCPSTAT_INC(tcps_keepprobe);
		t_template = tcpip_maketemplate(inp);
		if (t_template) {
			tcp_respond(tp, t_template->tt_ipgen,
				    &t_template->tt_t, (struct mbuf *)NULL,
				    tp->rcv_nxt, tp->snd_una - 1, 0);
			free(t_template, M_TEMP);
		}
		callout_reset(&tp->t_timers->tt_keep, TP_KEEPINTVL(tp),
			      tcp_timer_keep, tp);
	} else
		callout_reset(&tp->t_timers->tt_keep, TP_KEEPIDLE(tp),
			      tcp_timer_keep, tp);

#ifdef TCPDEBUG
	if (inp->inp_socket->so_options & SO_DEBUG)
		tcp_trace(TA_USER, ostate, tp, (void *)0, (struct tcphdr *)0,
			  PRU_SLOWTIMO);
#endif
	TCP_PROBE2(debug__user, tp, PRU_SLOWTIMO);
	INP_WUNLOCK(inp);
	CURVNET_RESTORE();
	return;

dropit:
	TCPSTAT_INC(tcps_keepdrops);

	if (tcp_inpinfo_lock_add(inp)) {
		tcp_inpinfo_lock_del(inp, tp);
		goto out;
	}
	tp = tcp_drop(tp, ETIMEDOUT);

#ifdef TCPDEBUG
	if (tp != NULL && (tp->t_inpcb->inp_socket->so_options & SO_DEBUG))
		tcp_trace(TA_USER, ostate, tp, (void *)0, (struct tcphdr *)0,
			  PRU_SLOWTIMO);
#endif
	TCP_PROBE2(debug__user, tp, PRU_SLOWTIMO);
	tcp_inpinfo_lock_del(inp, tp);
out:
	CURVNET_RESTORE();
}
예제 #18
0
void
isci_io_request_execute_smp_io(union ccb *ccb,
    struct ISCI_CONTROLLER *controller)
{
	SCI_STATUS status;
	target_id_t target_id = ccb->ccb_h.target_id;
	struct ISCI_REQUEST *request;
	struct ISCI_IO_REQUEST *io_request;
	SCI_REMOTE_DEVICE_HANDLE_T smp_device_handle;
	struct ISCI_REMOTE_DEVICE *end_device = controller->remote_device[target_id];

	/* SMP commands are sent to an end device, because SMP devices are not
	 *  exposed to the kernel.  It is our responsibility to use this method
	 *  to get the SMP device that contains the specified end device.  If
	 *  the device is direct-attached, the handle will come back NULL, and
	 *  we'll just fail the SMP_IO with DEV_NOT_THERE.
	 */
	scif_remote_device_get_containing_device(end_device->sci_object,
	    &smp_device_handle);

	if (smp_device_handle == NULL) {
		ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
		ccb->ccb_h.status &= ~CAM_STATUS_MASK;
		ccb->ccb_h.status |= CAM_DEV_NOT_THERE;
		xpt_done(ccb);
		return;
	}

	if (sci_pool_empty(controller->request_pool)) {
		ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
		ccb->ccb_h.status &= ~CAM_STATUS_MASK;
		ccb->ccb_h.status |= CAM_REQUEUE_REQ;
		xpt_freeze_simq(controller->sim, 1);
		controller->is_frozen = TRUE;
		xpt_done(ccb);
		return;
	}

	ASSERT(device->is_resetting == FALSE);

	sci_pool_get(controller->request_pool, request);
	io_request = (struct ISCI_IO_REQUEST *)request;

	io_request->ccb = ccb;
	io_request->parent.remote_device_handle = smp_device_handle;

	status = isci_smp_request_construct(io_request);

	if (status != SCI_SUCCESS) {
		isci_io_request_complete(controller->scif_controller_handle,
		    smp_device_handle, io_request, (SCI_IO_STATUS)status);
		return;
	}

	sci_object_set_association(io_request->sci_object, io_request);

	status = (SCI_STATUS) scif_controller_start_io(
	    controller->scif_controller_handle, smp_device_handle,
	    io_request->sci_object, SCI_CONTROLLER_INVALID_IO_TAG);

	if (status != SCI_SUCCESS) {
		isci_io_request_complete(controller->scif_controller_handle,
		    smp_device_handle, io_request, (SCI_IO_STATUS)status);
		return;
	}

	if (ccb->ccb_h.timeout != CAM_TIME_INFINITY)
		callout_reset(&io_request->parent.timer, ccb->ccb_h.timeout,
		    isci_io_request_timeout, request);
}
예제 #19
0
파일: aha.c 프로젝트: ChristosKa/freebsd
static void
ahadone(struct aha_softc *aha, struct aha_ccb *accb, aha_mbi_comp_code_t comp_code)
{
	union  ccb	  *ccb;
	struct ccb_scsiio *csio;

	ccb = accb->ccb;
	csio = &accb->ccb->csio;

	if ((accb->flags & ACCB_ACTIVE) == 0) {
		device_printf(aha->dev, 
		    "ahadone - Attempt to free non-active ACCB %p\n",
		    (void *)accb);
		return;
	}

	if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
		bus_dmasync_op_t op;

		if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN)
			op = BUS_DMASYNC_POSTREAD;
		else
			op = BUS_DMASYNC_POSTWRITE;
		bus_dmamap_sync(aha->buffer_dmat, accb->dmamap, op);
		bus_dmamap_unload(aha->buffer_dmat, accb->dmamap);
	}

	if (accb == aha->recovery_accb) {
		/*
		 * The recovery ACCB does not have a CCB associated
		 * with it, so short circuit the normal error handling.
		 * We now traverse our list of pending CCBs and process
		 * any that were terminated by the recovery CCBs action.
		 * We also reinstate timeouts for all remaining, pending,
		 * CCBs.
		 */
		struct cam_path *path;
		struct ccb_hdr *ccb_h;
		cam_status error;

		/* Notify all clients that a BDR occured */
		error = xpt_create_path(&path, /*periph*/NULL,
		    cam_sim_path(aha->sim), accb->hccb.target,
		    CAM_LUN_WILDCARD);

		if (error == CAM_REQ_CMP) {
			xpt_async(AC_SENT_BDR, path, NULL);
			xpt_free_path(path);
		}

		ccb_h = LIST_FIRST(&aha->pending_ccbs);
		while (ccb_h != NULL) {
			struct aha_ccb *pending_accb;

			pending_accb = (struct aha_ccb *)ccb_h->ccb_accb_ptr;
			if (pending_accb->hccb.target == accb->hccb.target) {
				pending_accb->hccb.ahastat = AHASTAT_HA_BDR;
				ccb_h = LIST_NEXT(ccb_h, sim_links.le);
				ahadone(aha, pending_accb, AMBI_ERROR);
			} else {
				callout_reset(&pending_accb->timer,
				    (ccb_h->timeout * hz) / 1000,
				    ahatimeout, pending_accb);
				ccb_h = LIST_NEXT(ccb_h, sim_links.le);
			}
		}
		device_printf(aha->dev, "No longer in timeout\n");
		return;
	}

	callout_stop(&accb->timer);

	switch (comp_code) {
	case AMBI_FREE:
		device_printf(aha->dev,
		    "ahadone - CCB completed with free status!\n");
		break;
	case AMBI_NOT_FOUND:
		device_printf(aha->dev,
		    "ahadone - CCB Abort failed to find CCB\n");
		break;
	case AMBI_ABORT:
	case AMBI_ERROR:
		/* An error occured */
		if (accb->hccb.opcode < INITIATOR_CCB_WRESID)
			csio->resid = 0;
		else
			csio->resid = aha_a24tou(accb->hccb.data_len);
		switch(accb->hccb.ahastat) {
		case AHASTAT_DATARUN_ERROR:
		{
			if (csio->resid <= 0) {
				csio->ccb_h.status = CAM_DATA_RUN_ERR;
				break;
			}
			/* FALLTHROUGH */
		}
		case AHASTAT_NOERROR:
			csio->scsi_status = accb->hccb.sdstat;
			csio->ccb_h.status |= CAM_SCSI_STATUS_ERROR;
			switch(csio->scsi_status) {
			case SCSI_STATUS_CHECK_COND:
			case SCSI_STATUS_CMD_TERMINATED:
				csio->ccb_h.status |= CAM_AUTOSNS_VALID;
				/*
				 * The aha writes the sense data at different
				 * offsets based on the scsi cmd len
				 */
				bcopy((caddr_t) &accb->hccb.scsi_cdb +
				    accb->hccb.cmd_len,
				    (caddr_t) &csio->sense_data,
				    accb->hccb.sense_len);
				break;
			default:
				break;
			case SCSI_STATUS_OK:
				csio->ccb_h.status = CAM_REQ_CMP;
				break;
			}
			break;
		case AHASTAT_SELTIMEOUT:
			csio->ccb_h.status = CAM_SEL_TIMEOUT;
			break;
		case AHASTAT_UNEXPECTED_BUSFREE:
			csio->ccb_h.status = CAM_UNEXP_BUSFREE;
			break;
		case AHASTAT_INVALID_PHASE:
			csio->ccb_h.status = CAM_SEQUENCE_FAIL;
			break;
		case AHASTAT_INVALID_ACTION_CODE:
			panic("%s: Inavlid Action code", aha_name(aha));
			break;
		case AHASTAT_INVALID_OPCODE:
			if (accb->hccb.opcode < INITIATOR_CCB_WRESID)
				panic("%s: Invalid CCB Opcode %x hccb = %p",
				    aha_name(aha), accb->hccb.opcode,
				    &accb->hccb);
			device_printf(aha->dev,
			    "AHA-1540A compensation failed\n");
			xpt_freeze_devq(ccb->ccb_h.path, /*count*/1);
			csio->ccb_h.status = CAM_REQUEUE_REQ;
			break;
		case AHASTAT_LINKED_CCB_LUN_MISMATCH:
			/* We don't even support linked commands... */
			panic("%s: Linked CCB Lun Mismatch", aha_name(aha));
			break;
		case AHASTAT_INVALID_CCB_OR_SG_PARAM:
			panic("%s: Invalid CCB or SG list", aha_name(aha));
			break;
		case AHASTAT_HA_SCSI_BUS_RESET:
			if ((csio->ccb_h.status & CAM_STATUS_MASK)
			    != CAM_CMD_TIMEOUT)
				csio->ccb_h.status = CAM_SCSI_BUS_RESET;
			break;
		case AHASTAT_HA_BDR:
			if ((accb->flags & ACCB_DEVICE_RESET) == 0)
				csio->ccb_h.status = CAM_BDR_SENT;
			else
				csio->ccb_h.status = CAM_CMD_TIMEOUT;
			break;
		}
		if (csio->ccb_h.status != CAM_REQ_CMP) {
			xpt_freeze_devq(csio->ccb_h.path, /*count*/1);
			csio->ccb_h.status |= CAM_DEV_QFRZN;
		}
		if ((accb->flags & ACCB_RELEASE_SIMQ) != 0)
			ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
		ahafreeccb(aha, accb);
		xpt_done(ccb);
		break;
	case AMBI_OK:
		/* All completed without incident */
		/* XXX DO WE NEED TO COPY SENSE BYTES HERE???? XXX */
		/* I don't think so since it works???? */
		ccb->ccb_h.status |= CAM_REQ_CMP;
		if ((accb->flags & ACCB_RELEASE_SIMQ) != 0)
			ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
		ahafreeccb(aha, accb);
		xpt_done(ccb);
		break;
	}
}
예제 #20
0
/*
 * The dmablit part of the IRQ handler. Trying to do only reasonably fast things here.
 * The rest, like unmapping and freeing memory for done blits is done in a separate workqueue
 * task. Basically the task of the interrupt handler is to submit a new blit to the engine, while
 * the workqueue task takes care of processing associated with the old blit.
 */
void
via_dmablit_handler(struct drm_device *dev, int engine, int from_irq)
{
	drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private;
	drm_via_blitq_t *blitq = dev_priv->blit_queues + engine;
	int cur;
	int done_transfer;
	uint32_t status = 0;

	DRM_DEBUG("DMA blit handler called. engine = %d, from_irq = %d, blitq = 0x%lx\n",
		  engine, from_irq, (unsigned long) blitq);

	mtx_lock(&blitq->blit_lock);

	done_transfer = blitq->is_active &&
	  (( status = VIA_READ(VIA_PCI_DMA_CSR0 + engine*0x04)) & VIA_DMA_CSR_TD);
	done_transfer = done_transfer || ( blitq->aborting && !(status & VIA_DMA_CSR_DE));

	cur = blitq->cur;
	if (done_transfer) {

		blitq->blits[cur]->aborted = blitq->aborting;
		blitq->done_blit_handle++;
		DRM_WAKEUP(&blitq->blit_queue[cur]);

		cur++;
		if (cur >= VIA_NUM_BLIT_SLOTS)
			cur = 0;
		blitq->cur = cur;

		/*
		 * Clear transfer done flag.
		 */

		VIA_WRITE(VIA_PCI_DMA_CSR0 + engine*0x04,  VIA_DMA_CSR_TD);

		blitq->is_active = 0;
		blitq->aborting = 0;

		taskqueue_enqueue(taskqueue_swi, &blitq->wq);

	} else if (blitq->is_active && (ticks >= blitq->end)) {

		/*
		 * Abort transfer after one second.
		 */

		via_abort_dmablit(dev, engine);
		blitq->aborting = 1;
		blitq->end = ticks + DRM_HZ;
	}

	if (!blitq->is_active) {
		if (blitq->num_outstanding) {
			via_fire_dmablit(dev, blitq->blits[cur], engine);
			blitq->is_active = 1;
			blitq->cur = cur;
			blitq->num_outstanding--;
			blitq->end = ticks + DRM_HZ;

			if (!callout_pending(&blitq->poll_timer))
				callout_reset(&blitq->poll_timer,
				    1, (timeout_t *)via_dmablit_timer,
				    (void *)blitq);
		} else {
			if (callout_pending(&blitq->poll_timer)) {
				callout_stop(&blitq->poll_timer);
			}
			via_dmablit_engine_off(dev, engine);
		}
	}

	mtx_unlock(&blitq->blit_lock);
}
예제 #21
0
파일: aha.c 프로젝트: ChristosKa/freebsd
static void
ahaexecuteccb(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error)
{
	struct	 aha_ccb *accb;
	union	 ccb *ccb;
	struct	 aha_softc *aha;
	uint32_t paddr;

	accb = (struct aha_ccb *)arg;
	ccb = accb->ccb;
	aha = (struct aha_softc *)ccb->ccb_h.ccb_aha_ptr;

	if (error != 0) {
		if (error != EFBIG)
			device_printf(aha->dev,
			    "Unexepected error 0x%x returned from "
			    "bus_dmamap_load\n", error);
		if (ccb->ccb_h.status == CAM_REQ_INPROG) {
			xpt_freeze_devq(ccb->ccb_h.path, /*count*/1);
			ccb->ccb_h.status = CAM_REQ_TOO_BIG|CAM_DEV_QFRZN;
		}
		ahafreeccb(aha, accb);
		xpt_done(ccb);
		return;
	}

	if (nseg != 0) {
		aha_sg_t *sg;
		bus_dma_segment_t *end_seg;
		bus_dmasync_op_t op;

		end_seg = dm_segs + nseg;

		/* Copy the segments into our SG list */
		sg = accb->sg_list;
		while (dm_segs < end_seg) {
			ahautoa24(dm_segs->ds_len, sg->len);
			ahautoa24(dm_segs->ds_addr, sg->addr);
			sg++;
			dm_segs++;
		}

		if (nseg > 1) {
			accb->hccb.opcode = aha->ccb_sg_opcode;
			ahautoa24((sizeof(aha_sg_t) * nseg),
			    accb->hccb.data_len);
			ahautoa24(accb->sg_list_phys, accb->hccb.data_addr);
		} else {
			bcopy(accb->sg_list->len, accb->hccb.data_len, 3);
			bcopy(accb->sg_list->addr, accb->hccb.data_addr, 3);
		}

		if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN)
			op = BUS_DMASYNC_PREREAD;
		else
			op = BUS_DMASYNC_PREWRITE;

		bus_dmamap_sync(aha->buffer_dmat, accb->dmamap, op);

	} else {
		accb->hccb.opcode = INITIATOR_CCB;
		ahautoa24(0, accb->hccb.data_len);
		ahautoa24(0, accb->hccb.data_addr);
	}

	/*
	 * Last time we need to check if this CCB needs to
	 * be aborted.
	 */
	if (ccb->ccb_h.status != CAM_REQ_INPROG) {
		if (nseg != 0)
			bus_dmamap_unload(aha->buffer_dmat, accb->dmamap);
		ahafreeccb(aha, accb);
		xpt_done(ccb);
		return;
	}

	accb->flags = ACCB_ACTIVE;
	ccb->ccb_h.status |= CAM_SIM_QUEUED;
	LIST_INSERT_HEAD(&aha->pending_ccbs, &ccb->ccb_h, sim_links.le);

	callout_reset(&accb->timer, (ccb->ccb_h.timeout * hz) / 1000,
	    ahatimeout, accb);

	/* Tell the adapter about this command */
	if (aha->cur_outbox->action_code != AMBO_FREE) {
		/*
		 * We should never encounter a busy mailbox.
		 * If we do, warn the user, and treat it as
		 * a resource shortage.  If the controller is
		 * hung, one of the pending transactions will
		 * timeout causing us to start recovery operations.
		 */
		device_printf(aha->dev,
		    "Encountered busy mailbox with %d out of %d "
		    "commands active!!!", aha->active_ccbs, aha->max_ccbs);
		callout_stop(&accb->timer);
		if (nseg != 0)
			bus_dmamap_unload(aha->buffer_dmat, accb->dmamap);
		ahafreeccb(aha, accb);
		aha->resource_shortage = TRUE;
		xpt_freeze_simq(aha->sim, /*count*/1);
		ccb->ccb_h.status = CAM_REQUEUE_REQ;
		xpt_done(ccb);
		return;
	}
	paddr = ahaccbvtop(aha, accb);
	ahautoa24(paddr, aha->cur_outbox->ccb_addr);
	aha->cur_outbox->action_code = AMBO_START;
	aha_outb(aha, COMMAND_REG, AOP_START_MBOX);

	ahanextoutbox(aha);
}
예제 #22
0
/*
 * Function name:	twa_attach
 * Description:		Allocates pci resources; updates sc; adds a node to the
 *			sysctl tree to expose the driver version; makes calls
 *			(to the Common Layer) to initialize ctlr, and to
 *			attach to CAM.
 *
 * Input:		dev	-- bus device corresponding to the ctlr
 * Output:		None
 * Return value:	0	-- success
 *			non-zero-- failure
 */
static TW_INT32
twa_attach(device_t dev)
{
	struct twa_softc	*sc = device_get_softc(dev);
	TW_INT32		bar_num;
	TW_INT32		bar0_offset;
	TW_INT32		bar_size;
	TW_INT32		error;

	tw_osli_dbg_dprintf(3, sc, "entered");

	sc->ctlr_handle.osl_ctlr_ctxt = sc;

	/* Initialize the softc structure. */
	sc->bus_dev = dev;
	sc->device_id = pci_get_device(dev);

	/* Initialize the mutexes right here. */
	sc->io_lock = &(sc->io_lock_handle);
	mtx_init(sc->io_lock, "tw_osl_io_lock", NULL, MTX_SPIN);
	sc->q_lock = &(sc->q_lock_handle);
	mtx_init(sc->q_lock, "tw_osl_q_lock", NULL, MTX_SPIN);
	sc->sim_lock = &(sc->sim_lock_handle);
	mtx_init(sc->sim_lock, "tw_osl_sim_lock", NULL, MTX_DEF | MTX_RECURSE);

	sysctl_ctx_init(&sc->sysctl_ctxt);
	sc->sysctl_tree = SYSCTL_ADD_NODE(&sc->sysctl_ctxt,
		SYSCTL_STATIC_CHILDREN(_hw), OID_AUTO,
		device_get_nameunit(dev), CTLFLAG_RD, 0, "");
	if (sc->sysctl_tree == NULL) {
		tw_osli_printf(sc, "error = %d",
			TW_CL_SEVERITY_ERROR_STRING,
			TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
			0x2000,
			"Cannot add sysctl tree node",
			ENXIO);
		return(ENXIO);
	}
	SYSCTL_ADD_STRING(&sc->sysctl_ctxt, SYSCTL_CHILDREN(sc->sysctl_tree),
		OID_AUTO, "driver_version", CTLFLAG_RD,
		TW_OSL_DRIVER_VERSION_STRING, 0, "TWA driver version");

	/* Force the busmaster enable bit on, in case the BIOS forgot. */
	pci_enable_busmaster(dev);

	/* Allocate the PCI register window. */
	if ((error = tw_cl_get_pci_bar_info(sc->device_id, TW_CL_BAR_TYPE_MEM,
		&bar_num, &bar0_offset, &bar_size))) {
		tw_osli_printf(sc, "error = %d",
			TW_CL_SEVERITY_ERROR_STRING,
			TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
			0x201F,
			"Can't get PCI BAR info",
			error);
		tw_osli_free_resources(sc);
		return(error);
	}
	sc->reg_res_id = PCIR_BARS + bar0_offset;
	if ((sc->reg_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
				&(sc->reg_res_id), RF_ACTIVE))
				== NULL) {
		tw_osli_printf(sc, "error = %d",
			TW_CL_SEVERITY_ERROR_STRING,
			TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
			0x2002,
			"Can't allocate register window",
			ENXIO);
		tw_osli_free_resources(sc);
		return(ENXIO);
	}
	sc->bus_tag = rman_get_bustag(sc->reg_res);
	sc->bus_handle = rman_get_bushandle(sc->reg_res);

	/* Allocate and register our interrupt. */
	sc->irq_res_id = 0;
	if ((sc->irq_res = bus_alloc_resource_any(sc->bus_dev, SYS_RES_IRQ,
				&(sc->irq_res_id),
				RF_SHAREABLE | RF_ACTIVE)) == NULL) {
		tw_osli_printf(sc, "error = %d",
			TW_CL_SEVERITY_ERROR_STRING,
			TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
			0x2003,
			"Can't allocate interrupt",
			ENXIO);
		tw_osli_free_resources(sc);
		return(ENXIO);
	}
	if ((error = twa_setup_intr(sc))) {
		tw_osli_printf(sc, "error = %d",
			TW_CL_SEVERITY_ERROR_STRING,
			TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
			0x2004,
			"Can't set up interrupt",
			error);
		tw_osli_free_resources(sc);
		return(error);
	}

	if ((error = tw_osli_alloc_mem(sc))) {
		tw_osli_printf(sc, "error = %d",
			TW_CL_SEVERITY_ERROR_STRING,
			TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
			0x2005,
			"Memory allocation failure",
			error);
		tw_osli_free_resources(sc);
		return(error);
	}

	/* Initialize the Common Layer for this controller. */
	if ((error = tw_cl_init_ctlr(&sc->ctlr_handle, sc->flags, sc->device_id,
			TW_OSLI_MAX_NUM_REQUESTS, TW_OSLI_MAX_NUM_AENS,
			sc->non_dma_mem, sc->dma_mem,
			sc->dma_mem_phys
			))) {
		tw_osli_printf(sc, "error = %d",
			TW_CL_SEVERITY_ERROR_STRING,
			TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
			0x2006,
			"Failed to initialize Common Layer/controller",
			error);
		tw_osli_free_resources(sc);
		return(error);
	}

	/* Create the control device. */
	sc->ctrl_dev = make_dev(&twa_cdevsw, device_get_unit(sc->bus_dev),
			UID_ROOT, GID_OPERATOR, S_IRUSR | S_IWUSR,
			"twa%d", device_get_unit(sc->bus_dev));
	sc->ctrl_dev->si_drv1 = sc;

	if ((error = tw_osli_cam_attach(sc))) {
		tw_osli_free_resources(sc);
		tw_osli_printf(sc, "error = %d",
			TW_CL_SEVERITY_ERROR_STRING,
			TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
			0x2007,
			"Failed to initialize CAM",
			error);
		return(error);
	}

	sc->watchdog_index = 0;
	callout_init(&(sc->watchdog_callout[0]), 1);
	callout_init(&(sc->watchdog_callout[1]), 1);
	callout_reset(&(sc->watchdog_callout[0]), 5*hz, twa_watchdog, &sc->ctlr_handle);

	return(0);
}
예제 #23
0
int
wdog_kern_pat(u_int utim)
{
	int error;

	if ((utim & WD_LASTVAL) != 0 && (utim & WD_INTERVAL) > 0)
		return (EINVAL);

	if ((utim & WD_LASTVAL) != 0) {
		/*
		 * if WD_LASTVAL is set, fill in the bits for timeout
		 * from the saved value in wd_last_u.
		 */
		MPASS((wd_last_u & ~WD_INTERVAL) == 0);
		utim &= ~WD_LASTVAL;
		utim |= wd_last_u;
	} else {
		/*
		 * Otherwise save the new interval.
		 * This can be zero (to disable the watchdog)
		 */
		wd_last_u = (utim & WD_INTERVAL);
		wd_last_u_sysctl = wd_last_u;
		wd_last_u_sysctl_secs = pow2ns_to_ticks(wd_last_u) / hz;
	}
	if ((utim & WD_INTERVAL) == WD_TO_NEVER) {
		utim = 0;

		/* Assume all is well; watchdog signals failure. */
		error = 0;
	} else {
		/* Assume no watchdog available; watchdog flags success */
		error = EOPNOTSUPP;
	}
	if (wd_softtimer) {
		if (utim == 0) {
			callout_stop(&wd_softtimeo_handle);
		} else {
			(void) callout_reset(&wd_softtimeo_handle,
			    pow2ns_to_ticks(utim), wd_timeout_cb, "soft");
		}
		error = 0;
	} else {
		EVENTHANDLER_INVOKE(watchdog_list, utim, &error);
	}
	wd_set_pretimeout(wd_pretimeout, true);
	/*
	 * If we were able to arm/strobe the watchdog, then
	 * update the last time it was strobed for WDIOC_GETTIMELEFT
	 */
	if (!error) {
		struct timespec ts;

		error = kern_clock_gettime(curthread /* XXX */,
		    CLOCK_MONOTONIC_FAST, &ts);
		if (!error) {
			wd_lastpat = ts.tv_sec;
			wd_lastpat_valid = 1;
		}
	}
	return (error);
}
예제 #24
0
static TW_VOID
twa_watchdog(TW_VOID *arg)
{
	struct tw_cl_ctlr_handle *ctlr_handle =
		(struct tw_cl_ctlr_handle *)arg;
	struct twa_softc		*sc = ctlr_handle->osl_ctlr_ctxt;
	int				i;
	int				i_need_a_reset = 0;
	int				driver_is_active = 0;
	int				my_watchdog_was_pending = 1234;
	TW_UINT64			current_time;
	struct tw_osli_req_context	*my_req;


//==============================================================================
	current_time = (TW_UINT64) (tw_osl_get_local_time());

	for (i = 0; i < TW_OSLI_MAX_NUM_REQUESTS; i++) {
		my_req = &(sc->req_ctx_buf[i]);

		if ((my_req->state == TW_OSLI_REQ_STATE_BUSY) &&
			(my_req->deadline) &&
			(my_req->deadline < current_time)) {
			tw_cl_set_reset_needed(ctlr_handle);
#ifdef    TW_OSL_DEBUG
			device_printf((sc)->bus_dev, "Request %d timed out! d = %llu, c = %llu\n", i, my_req->deadline, current_time);
#else  /* TW_OSL_DEBUG */
			device_printf((sc)->bus_dev, "Request %d timed out!\n", i);
#endif /* TW_OSL_DEBUG */
			break;
		}
	}
//==============================================================================

	i_need_a_reset = tw_cl_is_reset_needed(ctlr_handle);

	i = (int) ((sc->watchdog_index++) & 1);

	driver_is_active = tw_cl_is_active(ctlr_handle);

	if (i_need_a_reset) {
#ifdef    TW_OSL_DEBUG
		device_printf((sc)->bus_dev, "Watchdog rescheduled in 70 seconds\n");
#endif /* TW_OSL_DEBUG */
		my_watchdog_was_pending =
			callout_reset(&(sc->watchdog_callout[i]), 70*hz, twa_watchdog, &sc->ctlr_handle);
		tw_cl_reset_ctlr(ctlr_handle);
#ifdef    TW_OSL_DEBUG
		device_printf((sc)->bus_dev, "Watchdog reset completed!\n");
#endif /* TW_OSL_DEBUG */
	} else if (driver_is_active) {
		my_watchdog_was_pending =
			callout_reset(&(sc->watchdog_callout[i]),  5*hz, twa_watchdog, &sc->ctlr_handle);
	}
#ifdef    TW_OSL_DEBUG
	if (i_need_a_reset || my_watchdog_was_pending)
		device_printf((sc)->bus_dev, "i_need_a_reset = %d, "
		"driver_is_active = %d, my_watchdog_was_pending = %d\n",
		i_need_a_reset, driver_is_active, my_watchdog_was_pending);
#endif /* TW_OSL_DEBUG */
}
예제 #25
0
파일: ntb_hw.c 프로젝트: Alkzndr/freebsd
static int
ntb_setup_soc(struct ntb_softc *ntb)
{
	uint32_t val, connection_type;

	val = pci_read_config(ntb->device, NTB_PPD_OFFSET, 4);

	connection_type = (val & SOC_PPD_CONN_TYPE) >> 8;
	switch (connection_type) {
	case NTB_CONN_B2B:
		ntb->conn_type = NTB_CONN_B2B;
		break;
	case NTB_CONN_RP:
	default:
		device_printf(ntb->device, "Connection type %d not supported\n",
		    connection_type);
		return (ENXIO);
	}

	if ((val & SOC_PPD_DEV_TYPE) != 0)
		ntb->dev_type = NTB_DEV_DSD;
	else
		ntb->dev_type = NTB_DEV_USD;

	/* Initiate PCI-E link training */
	pci_write_config(ntb->device, NTB_PPD_OFFSET, val | SOC_PPD_INIT_LINK,
	    4);

	ntb->reg_ofs.pdb	 = SOC_PDOORBELL_OFFSET;
	ntb->reg_ofs.pdb_mask	 = SOC_PDBMSK_OFFSET;
	ntb->reg_ofs.sbar2_xlat  = SOC_SBAR2XLAT_OFFSET;
	ntb->reg_ofs.sbar4_xlat  = SOC_SBAR4XLAT_OFFSET;
	ntb->reg_ofs.lnk_cntl	 = SOC_NTBCNTL_OFFSET;
	ntb->reg_ofs.lnk_stat	 = SOC_LINK_STATUS_OFFSET;
	ntb->reg_ofs.spad_local	 = SOC_SPAD_OFFSET;
	ntb->reg_ofs.spci_cmd	 = SOC_PCICMD_OFFSET;

	if (ntb->conn_type == NTB_CONN_B2B) {
		ntb->reg_ofs.sdb	 = SOC_B2B_DOORBELL_OFFSET;
		ntb->reg_ofs.spad_remote = SOC_B2B_SPAD_OFFSET;
		ntb->limits.max_spads	 = SOC_MAX_SPADS;
	} else {
		ntb->reg_ofs.sdb	 = SOC_PDOORBELL_OFFSET;
		ntb->reg_ofs.spad_remote = SOC_SPAD_OFFSET;
		ntb->limits.max_spads	 = SOC_MAX_COMPAT_SPADS;
	}

	ntb->limits.max_db_bits  = SOC_MAX_DB_BITS;
	ntb->limits.msix_cnt	 = SOC_MSIX_CNT;
	ntb->bits_per_vector	 = SOC_DB_BITS_PER_VEC;

	/*
	 * FIXME - MSI-X bug on early SOC HW, remove once internal issue is
	 * resolved.  Mask transaction layer internal parity errors.
	 */
	pci_write_config(ntb->device, 0xFC, 0x4, 4);

	configure_soc_secondary_side_bars(ntb);

	/* Enable Bus Master and Memory Space on the secondary side */
	ntb_reg_write(2, ntb->reg_ofs.spci_cmd,
	    PCIM_CMD_MEMEN | PCIM_CMD_BUSMASTEREN);
	callout_reset(&ntb->heartbeat_timer, 0, ntb_handle_heartbeat, ntb);

	return (0);
}
예제 #26
0
void
sdp_post_sends(struct sdp_sock *ssk, int wait)
{
	struct mbuf *mb;
	int post_count = 0;
	struct socket *sk;
	int low;

	sk = ssk->socket;
	if (unlikely(!ssk->id)) {
		if (sk->so_snd.sb_sndptr) {
			sdp_dbg(ssk->socket,
				"Send on socket without cmid ECONNRESET.\n");
			sdp_notify(ssk, ECONNRESET);
		}
		return;
	}
again:
	if (sdp_tx_ring_slots_left(ssk) < SDP_TX_SIZE / 2)
		sdp_xmit_poll(ssk,  1);

	if (ssk->recv_request &&
	    ring_tail(ssk->rx_ring) >= ssk->recv_request_head &&
	    tx_credits(ssk) >= SDP_MIN_TX_CREDITS &&
	    sdp_tx_ring_slots_left(ssk)) {
		mb = sdp_alloc_mb_chrcvbuf_ack(sk,
		    ssk->recv_bytes - SDP_HEAD_SIZE, wait);
		if (mb == NULL)
			goto allocfail;
		ssk->recv_request = 0;
		sdp_post_send(ssk, mb);
		post_count++;
	}

	if (tx_credits(ssk) <= SDP_MIN_TX_CREDITS &&
	    sdp_tx_ring_slots_left(ssk) && sk->so_snd.sb_sndptr &&
	    sdp_nagle_off(ssk, sk->so_snd.sb_sndptr)) {
		SDPSTATS_COUNTER_INC(send_miss_no_credits);
	}

	while (tx_credits(ssk) > SDP_MIN_TX_CREDITS &&
	    sdp_tx_ring_slots_left(ssk) && (mb = sk->so_snd.sb_sndptr) &&
	    sdp_nagle_off(ssk, mb)) {
		struct mbuf *n;

		SOCKBUF_LOCK(&sk->so_snd);
		sk->so_snd.sb_sndptr = mb->m_nextpkt;
		sk->so_snd.sb_mb = mb->m_nextpkt;
		mb->m_nextpkt = NULL;
		SB_EMPTY_FIXUP(&sk->so_snd);
		for (n = mb; n != NULL; n = n->m_next)
			sbfree(&sk->so_snd, n);
		SOCKBUF_UNLOCK(&sk->so_snd);
		sdp_post_send(ssk, mb);
		post_count++;
	}

	if (credit_update_needed(ssk) && ssk->state >= TCPS_ESTABLISHED &&
	    ssk->state < TCPS_FIN_WAIT_2) {
		mb = sdp_alloc_mb_data(ssk->socket, wait);
		if (mb == NULL)
			goto allocfail;
		sdp_post_send(ssk, mb);

		SDPSTATS_COUNTER_INC(post_send_credits);
		post_count++;
	}

	/* send DisConn if needed
	 * Do not send DisConn if there is only 1 credit. Compliance with CA4-82
	 * If one credit is available, an implementation shall only send SDP
	 * messages that provide additional credits and also do not contain ULP
	 * payload. */
	if ((ssk->flags & SDP_NEEDFIN) && !sk->so_snd.sb_sndptr &&
	    tx_credits(ssk) > 1) {
		mb = sdp_alloc_mb_disconnect(sk, wait);
		if (mb == NULL)
			goto allocfail;
		ssk->flags &= ~SDP_NEEDFIN;
		sdp_post_send(ssk, mb);
		post_count++;
	}
	low = (sdp_tx_ring_slots_left(ssk) <= SDP_MIN_TX_CREDITS);
	if (post_count || low) {
		if (low)
			sdp_arm_tx_cq(ssk);
		if (sdp_xmit_poll(ssk, low))
			goto again;
	}
	return;

allocfail:
	ssk->nagle_last_unacked = -1;
	callout_reset(&ssk->nagle_timer, 1, sdp_nagle_timeout, ssk);
	return;
}
예제 #27
0
파일: com.c 프로젝트: lacombar/netbsd-alc
int
comopen(dev_t dev, int flag, int mode, struct lwp *l)
{
	struct com_softc *sc;
	int iobase;
	struct tty *tp;
	int s;
	int error = 0;

	sc =  device_lookup_private(&xcom_cd, COMUNIT(dev));
	if (!sc)
		return ENXIO;

	if (!sc->sc_tty) {
		tp = sc->sc_tty = ttymalloc();
		tty_attach(tp);
	} else
		tp = sc->sc_tty;

	tp->t_oproc = comstart;
	tp->t_param = comparam;
	tp->t_dev = dev;

	if (kauth_authorize_device_tty(l->l_cred, KAUTH_DEVICE_TTY_OPEN, tp))
		return (EBUSY);

	s = spltty();

	if (!ISSET(tp->t_state, TS_ISOPEN) && tp->t_wopen == 0) {
		ttychars(tp);
		tp->t_iflag = TTYDEF_IFLAG;
		tp->t_oflag = TTYDEF_OFLAG;
		tp->t_cflag = TTYDEF_CFLAG;
		if (ISSET(sc->sc_swflags, COM_SW_CLOCAL))
			SET(tp->t_cflag, CLOCAL);
		if (ISSET(sc->sc_swflags, COM_SW_CRTSCTS))
			SET(tp->t_cflag, CRTSCTS);
		if (ISSET(sc->sc_swflags, COM_SW_MDMBUF))
			SET(tp->t_cflag, MDMBUF);
		tp->t_lflag = TTYDEF_LFLAG;
		tp->t_ispeed = tp->t_ospeed = comdefaultrate;

		comparam(tp, &tp->t_termios);
		ttsetwater(tp);

		if (comsopen++ == 0)
			callout_reset(&com_poll_ch, 1, compollin, NULL);

		sc->sc_ibufp = sc->sc_ibuf = sc->sc_ibufs[0];
		sc->sc_ibufhigh = sc->sc_ibuf + COM_IHIGHWATER;
		sc->sc_ibufend = sc->sc_ibuf + COM_IBUFSIZE;

		iobase = sc->sc_iobase;
#ifdef COM_HAYESP
		/* Setup the ESP board */
		if (ISSET(sc->sc_hwflags, COM_HW_HAYESP)) {
			int hayespbase = sc->sc_hayespbase;

			outb(iobase + com_fifo,
			     FIFO_DMA_MODE|FIFO_ENABLE|
			     FIFO_RCV_RST|FIFO_XMT_RST|FIFO_TRIGGER_8);

			/* Set 16550 compatibility mode */
			outb(hayespbase + HAYESP_CMD1, HAYESP_SETMODE);
			outb(hayespbase + HAYESP_CMD2,
			     HAYESP_MODE_FIFO|HAYESP_MODE_RTS|
			     HAYESP_MODE_SCALE);

			/* Set RTS/CTS flow control */
			outb(hayespbase + HAYESP_CMD1, HAYESP_SETFLOWTYPE);
			outb(hayespbase + HAYESP_CMD2, HAYESP_FLOW_RTS);
			outb(hayespbase + HAYESP_CMD2, HAYESP_FLOW_CTS);

			/* Set flow control levels */
			outb(hayespbase + HAYESP_CMD1, HAYESP_SETRXFLOW);
			outb(hayespbase + HAYESP_CMD2,
			     HAYESP_HIBYTE(HAYESP_RXHIWMARK));
			outb(hayespbase + HAYESP_CMD2,
			     HAYESP_LOBYTE(HAYESP_RXHIWMARK));
			outb(hayespbase + HAYESP_CMD2,
			     HAYESP_HIBYTE(HAYESP_RXLOWMARK));
			outb(hayespbase + HAYESP_CMD2,
			     HAYESP_LOBYTE(HAYESP_RXLOWMARK));
		} else
#endif
		if (ISSET(sc->sc_hwflags, COM_HW_FIFO))
			/* Set the FIFO threshold based on the receive speed. */
			outb(pio(iobase , com_fifo),
			    FIFO_ENABLE | FIFO_RCV_RST | FIFO_XMT_RST |
			    (tp->t_ispeed <= 1200 ? FIFO_TRIGGER_1 : FIFO_TRIGGER_8));
		/* flush any pending I/O */
		while (ISSET(inb(pio(iobase , com_lsr)), LSR_RXRDY))
			(void) inb(pio(iobase , com_data));
		/* you turn me on, baby */
		sc->sc_mcr = MCR_DTR | MCR_RTS;
		if (!ISSET(sc->sc_hwflags, COM_HW_NOIEN))
			SET(sc->sc_mcr, MCR_IENABLE | MCR_DRS); /*   */
		outb(pio(iobase , com_mcr), sc->sc_mcr);
		sc->sc_ier = IER_ERXRDY | IER_ERLS | IER_EMSC;
		outb(pio(iobase , com_ier), sc->sc_ier);

		sc->sc_msr = inb(pio(iobase , com_msr));
		if (ISSET(sc->sc_swflags, COM_SW_SOFTCAR) ||
		    ISSET(sc->sc_msr, MSR_DCD) || ISSET(tp->t_cflag, MDMBUF))
			SET(tp->t_state, TS_CARR_ON);
		else
			CLR(tp->t_state, TS_CARR_ON);
	}
	splx(s);

	error = ttyopen(tp, COMDIALOUT(dev), ISSET(flag, O_NONBLOCK));

	if (!error)
		error = (*tp->t_linesw->l_open)(dev, tp);

	/* XXX cleanup on error */

	return error;
}
예제 #28
0
파일: chrome_kb.c 프로젝트: Alkzndr/freebsd
/* read char from the keyboard */
static uint32_t
ckb_read_char_locked(keyboard_t *kbd, int wait)
{
	struct ckb_softc *sc;
	int i,j;
	uint16_t key;
	int oldbit;
	int newbit;
	int status;

	sc = kbd->kb_data;

	CKB_CTX_LOCK_ASSERT();

	if (!KBD_IS_ACTIVE(kbd))
		return (NOKEY);

	if (sc->sc_repeating) {
		sc->sc_repeating = 0;
		callout_reset(&sc->sc_repeat_callout, hz / 10,
                    ckb_repeat, sc);
		return (sc->sc_repeat_key);
	};

	if (sc->sc_flags & CKB_FLAG_POLLING) {
		for (;;) {
			GPIO_PIN_GET(sc->gpio_dev, sc->gpio, &status);
			if (status == 0) {
				if (ec_command(EC_CMD_MKBP_STATE, sc->scan,
					sc->cols,
				    sc->scan, sc->cols)) {
					return (NOKEY);
				}
				break;
			}
			if (!wait) {
				return (NOKEY);
			}
			DELAY(1000);
		}
	};

	for (i = 0; i < sc->cols; i++) {
		for (j = 0; j < sc->rows; j++) {
			oldbit = (sc->scan_local[i] & (1 << j));
			newbit = (sc->scan[i] & (1 << j));

			if (oldbit == newbit)
				continue;

			key = keymap_read(sc, i, j);
			if (key == 0) {
				continue;
			};

			if (newbit > 0) {
				/* key pressed */
				sc->scan_local[i] |= (1 << j);

				/* setup repeating */
				sc->sc_repeat_key = key;
				callout_reset(&sc->sc_repeat_callout,
				    hz / 2, ckb_repeat, sc);

			} else {
				/* key released */
				sc->scan_local[i] &= ~(1 << j);

				/* release flag */
				key |= 0x80;

				/* unsetup repeating */
				sc->sc_repeat_key = -1;
				callout_stop(&sc->sc_repeat_callout);
			}

			return (key);
		}
	}

	return (NOKEY);
}
예제 #29
0
파일: test-timer.cpp 프로젝트: kstv/libwait
static void module_init(void)
{
	waitcb_init(&_timer, flush_delack, NULL);
	callout_reset(&_timer, 200);
	return;
}
예제 #30
0
static void
wdc_atapi_start(struct ata_channel *chp, struct ata_xfer *xfer)
{
	struct atac_softc *atac = chp->ch_atac;
	struct wdc_softc *wdc = CHAN_TO_WDC(chp);
	struct wdc_regs *wdr = &wdc->regs[chp->ch_channel];
	struct scsipi_xfer *sc_xfer = xfer->c_cmd;
	struct ata_drive_datas *drvp = &chp->ch_drive[xfer->c_drive];
	int wait_flags = (sc_xfer->xs_control & XS_CTL_POLL) ? AT_POLL : 0;
	const char *errstring;

	ATADEBUG_PRINT(("wdc_atapi_start %s:%d:%d, scsi flags 0x%x \n",
	    device_xname(atac->atac_dev), chp->ch_channel, drvp->drive,
	    sc_xfer->xs_control), DEBUG_XFERS);
#if NATA_DMA
	if ((xfer->c_flags & C_DMA) && (drvp->n_xfers <= NXFER))
		drvp->n_xfers++;
#endif
	/* Do control operations specially. */
	if (__predict_false(drvp->state < READY)) {
		/* If it's not a polled command, we need the kernel thread */
		if ((sc_xfer->xs_control & XS_CTL_POLL) == 0 &&
		    (chp->ch_flags & ATACH_TH_RUN) == 0) {
			chp->ch_queue->queue_freeze++;
			wakeup(&chp->ch_thread);
			return;
		}
		/*
		 * disable interrupts, all commands here should be quick
		 * enough to be able to poll, and we don't go here that often
		 */
		bus_space_write_1(wdr->ctl_iot, wdr->ctl_ioh, wd_aux_ctlr,
		     WDCTL_4BIT | WDCTL_IDS);
		if (wdc->select)
			wdc->select(chp, xfer->c_drive);
		bus_space_write_1(wdr->cmd_iot, wdr->cmd_iohs[wd_sdh], 0,
		    WDSD_IBM | (xfer->c_drive << 4));
		/* Don't try to set mode if controller can't be adjusted */
		if (atac->atac_set_modes == NULL)
			goto ready;
		/* Also don't try if the drive didn't report its mode */
		if ((drvp->drive_flags & ATA_DRIVE_MODE) == 0)
			goto ready;
		errstring = "unbusy";
		if (wdc_wait_for_unbusy(chp, ATAPI_DELAY, wait_flags))
			goto timeout;
		wdccommand(chp, drvp->drive, SET_FEATURES, 0, 0, 0,
		    0x08 | drvp->PIO_mode, WDSF_SET_MODE);
		errstring = "piomode";
		if (wdc_wait_for_unbusy(chp, ATAPI_MODE_DELAY, wait_flags))
			goto timeout;
		if (chp->ch_status & WDCS_ERR) {
			if (chp->ch_error == WDCE_ABRT) {
				/*
				 * Some ATAPI drives reject PIO settings.
				 * Fall back to PIO mode 3 since that's the
				 * minimum for ATAPI.
				 */
				printf("%s:%d:%d: PIO mode %d rejected, "
				    "falling back to PIO mode 3\n",
				    device_xname(atac->atac_dev),
				    chp->ch_channel, xfer->c_drive,
				    drvp->PIO_mode);
				if (drvp->PIO_mode > 3)
					drvp->PIO_mode = 3;
			} else
				goto error;
		}
#if NATA_DMA
#if NATA_UDMA
		if (drvp->drive_flags & ATA_DRIVE_UDMA) {
			wdccommand(chp, drvp->drive, SET_FEATURES, 0, 0, 0,
			    0x40 | drvp->UDMA_mode, WDSF_SET_MODE);
		} else
#endif
		if (drvp->drive_flags & ATA_DRIVE_DMA) {
			wdccommand(chp, drvp->drive, SET_FEATURES, 0, 0, 0,
			    0x20 | drvp->DMA_mode, WDSF_SET_MODE);
		} else {
			goto ready;
		}
		errstring = "dmamode";
		if (wdc_wait_for_unbusy(chp, ATAPI_MODE_DELAY, wait_flags))
			goto timeout;
		if (chp->ch_status & WDCS_ERR) {
			if (chp->ch_error == WDCE_ABRT) {
#if NATA_UDMA
				if (drvp->drive_flags & ATA_DRIVE_UDMA)
					goto error;
				else
#endif
				{
					/*
					 * The drive rejected our DMA setting.
					 * Fall back to mode 1.
					 */
					printf("%s:%d:%d: DMA mode %d rejected, "
					    "falling back to DMA mode 0\n",
					    device_xname(atac->atac_dev),
					    chp->ch_channel, xfer->c_drive,
					    drvp->DMA_mode);
					if (drvp->DMA_mode > 0)
						drvp->DMA_mode = 0;
				}
			} else
				goto error;
		}
#endif	/* NATA_DMA */
ready:
		drvp->state = READY;
		bus_space_write_1(wdr->ctl_iot, wdr->ctl_ioh, wd_aux_ctlr,
		    WDCTL_4BIT);
		delay(10); /* some drives need a little delay here */
	}
	/* start timeout machinery */
	if ((sc_xfer->xs_control & XS_CTL_POLL) == 0)
		callout_reset(&chp->ch_callout, mstohz(sc_xfer->timeout),
		    wdctimeout, chp);

	if (wdc->select)
		wdc->select(chp, xfer->c_drive);
	bus_space_write_1(wdr->cmd_iot, wdr->cmd_iohs[wd_sdh], 0,
	    WDSD_IBM | (xfer->c_drive << 4));
	switch (wdc_wait_for_unbusy(chp, ATAPI_DELAY, wait_flags)) {
	case WDCWAIT_OK:
		break;
	case WDCWAIT_TOUT:
		printf("wdc_atapi_start: not ready, st = %02x\n",
		    chp->ch_status);
		sc_xfer->error = XS_TIMEOUT;
		wdc_atapi_reset(chp, xfer);
		return;
	case WDCWAIT_THR:
		return;
	}

	/*
	 * Even with WDCS_ERR, the device should accept a command packet
	 * Limit length to what can be stuffed into the cylinder register
	 * (16 bits).  Some CD-ROMs seem to interpret '0' as 65536,
	 * but not all devices do that and it's not obvious from the
	 * ATAPI spec that that behaviour should be expected.  If more
	 * data is necessary, multiple data transfer phases will be done.
	 */

	wdccommand(chp, xfer->c_drive, ATAPI_PKT_CMD,
	    xfer->c_bcount <= 0xffff ? xfer->c_bcount : 0xffff,
	    0, 0, 0,
#if NATA_DMA
	    (xfer->c_flags & C_DMA) ? ATAPI_PKT_CMD_FTRE_DMA :
#endif
	    0
	    );

#if NATA_PIOBM
	if (xfer->c_flags & C_PIOBM) {
		int error;
		int dma_flags = (sc_xfer->xs_control & XS_CTL_DATA_IN)
		    ?  WDC_DMA_READ : 0;
		if (xfer->c_flags & C_POLL) {
			/* XXX not supported yet --- fall back to PIO */
			xfer->c_flags &= ~C_PIOBM;
		} else {
			/* Init the DMA channel. */
			error = (*wdc->dma_init)(wdc->dma_arg,
			    chp->ch_channel, xfer->c_drive,
			    (char *)xfer->c_databuf,
			    xfer->c_bcount,
			    dma_flags | WDC_DMA_PIOBM_ATAPI);
			if (error) {
				if (error == EINVAL) {
					/*
					 * We can't do DMA on this transfer
					 * for some reason.  Fall back to
					 * PIO.
					 */
					xfer->c_flags &= ~C_PIOBM;
					error = 0;
				} else {
					sc_xfer->error = XS_DRIVER_STUFFUP;
					errstring = "piobm";
					goto error;
				}
			}
		}
	}
#endif
	/*
	 * If there is no interrupt for CMD input, busy-wait for it (done in
	 * the interrupt routine. If it is a polled command, call the interrupt
	 * routine until command is done.
	 */
	if ((sc_xfer->xs_periph->periph_cap & ATAPI_CFG_DRQ_MASK) !=
	    ATAPI_CFG_IRQ_DRQ || (sc_xfer->xs_control & XS_CTL_POLL)) {
		/* Wait for at last 400ns for status bit to be valid */
		DELAY(1);
		wdc_atapi_intr(chp, xfer, 0);
	} else {
		chp->ch_flags |= ATACH_IRQ_WAIT;
	}
	if (sc_xfer->xs_control & XS_CTL_POLL) {
#if NATA_DMA
		if (chp->ch_flags & ATACH_DMA_WAIT) {
			wdc_dmawait(chp, xfer, sc_xfer->timeout);
			chp->ch_flags &= ~ATACH_DMA_WAIT;
		}
#endif
		while ((sc_xfer->xs_status & XS_STS_DONE) == 0) {
			/* Wait for at last 400ns for status bit to be valid */
			DELAY(1);
			wdc_atapi_intr(chp, xfer, 0);
		}
	}
	return;
timeout:
	printf("%s:%d:%d: %s timed out\n",
	    device_xname(atac->atac_dev), chp->ch_channel, xfer->c_drive,
	    errstring);
	sc_xfer->error = XS_TIMEOUT;
	bus_space_write_1(wdr->ctl_iot, wdr->ctl_ioh, wd_aux_ctlr, WDCTL_4BIT);
	delay(10); /* some drives need a little delay here */
	wdc_atapi_reset(chp, xfer);
	return;
error:
	printf("%s:%d:%d: %s ",
	    device_xname(atac->atac_dev), chp->ch_channel, xfer->c_drive,
	    errstring);
	printf("error (0x%x)\n", chp->ch_error);
	sc_xfer->error = XS_SHORTSENSE;
	sc_xfer->sense.atapi_sense = chp->ch_error;
	bus_space_write_1(wdr->ctl_iot, wdr->ctl_ioh, wd_aux_ctlr, WDCTL_4BIT);
	delay(10); /* some drives need a little delay here */
	wdc_atapi_reset(chp, xfer);
	return;
}