static void kw2xrf_tx_exec(kw2xrf_t *dev) { if ((dev->netdev.flags & KW2XRF_OPT_AUTOACK) && (_send_last_fcf & IEEE802154_FCF_ACK_REQ)) { kw2xrf_set_sequence(dev, XCVSEQ_TX_RX); } else { kw2xrf_set_sequence(dev, XCVSEQ_TRANSMIT); } }
static int _set_state(kw2xrf_t *dev, netopt_state_t state) { switch (state) { case NETOPT_STATE_SLEEP: kw2xrf_set_power_mode(dev, KW2XRF_DOZE); break; case NETOPT_STATE_IDLE: kw2xrf_set_power_mode(dev, KW2XRF_AUTODOZE); kw2xrf_set_sequence(dev, dev->idle_state); break; case NETOPT_STATE_TX: if (dev->netdev.flags & KW2XRF_OPT_PRELOADING) { kw2xrf_tx_exec(dev); } break; case NETOPT_STATE_RESET: kw2xrf_reset_phy(dev); break; case NETOPT_STATE_OFF: /* TODO: Replace with powerdown (set reset input low) */ kw2xrf_set_power_mode(dev, KW2XRF_HIBERNATE); break; default: return -ENOTSUP; } return sizeof(netopt_state_t); }
void kw2xrf_reset_phy(kw2xrf_t *dev) { netdev_ieee802154_reset(&dev->netdev); dev->tx_power = KW2XRF_DEFAULT_TX_POWER; kw2xrf_set_tx_power(dev, dev->tx_power); kw2xrf_set_channel(dev, KW2XRF_DEFAULT_CHANNEL); kw2xrf_set_pan(dev, KW2XRF_DEFAULT_PANID); kw2xrf_set_address(dev); kw2xrf_set_cca_mode(dev, 1); kw2xrf_set_rx_watermark(dev, 1); kw2xrf_set_option(dev, KW2XRF_OPT_AUTOACK, true); kw2xrf_set_option(dev, KW2XRF_OPT_ACK_REQ, true); kw2xrf_set_option(dev, KW2XRF_OPT_AUTOCCA, true); kw2xrf_set_power_mode(dev, KW2XRF_AUTODOZE); kw2xrf_set_sequence(dev, dev->idle_state); kw2xrf_clear_dreg_bit(dev, MKW2XDM_PHY_CTRL2, MKW2XDM_PHY_CTRL2_SEQMSK); kw2xrf_enable_irq_b(dev); DEBUG("[kw2xrf] init phy and (re)set to channel %d and pan %d.\n", KW2XRF_DEFAULT_CHANNEL, KW2XRF_DEFAULT_PANID); }
static void _isr_event_seq_tr(netdev_t *netdev, uint8_t *dregs) { kw2xrf_t *dev = (kw2xrf_t *)netdev; uint8_t irqsts1 = 0; if (dregs[MKW2XDM_IRQSTS1] & MKW2XDM_IRQSTS1_TXIRQ) { DEBUG("[kw2xrf] finished TXSEQ\n"); irqsts1 |= MKW2XDM_IRQSTS1_TXIRQ; if (dregs[MKW2XDM_PHY_CTRL1] & MKW2XDM_PHY_CTRL1_RXACKRQD) { DEBUG("[kw2xrf] wait for RX ACK\n"); kw2xrf_seq_timeout_on(dev, _MACACKWAITDURATION); } } if (dregs[MKW2XDM_IRQSTS1] & MKW2XDM_IRQSTS1_RXWTRMRKIRQ) { DEBUG("[kw2xrf] got RXWTRMRKIRQ\n"); irqsts1 |= MKW2XDM_IRQSTS1_RXWTRMRKIRQ; } if (dregs[MKW2XDM_IRQSTS1] & MKW2XDM_IRQSTS1_FILTERFAIL_IRQ) { DEBUG("[kw2xrf] got FILTERFAILIRQ\n"); irqsts1 |= MKW2XDM_IRQSTS1_FILTERFAIL_IRQ; } if (dregs[MKW2XDM_IRQSTS1] & MKW2XDM_IRQSTS1_RXIRQ) { DEBUG("[kw2xrf] got RX ACK\n"); irqsts1 |= MKW2XDM_IRQSTS1_RXIRQ; } if (dregs[MKW2XDM_IRQSTS1] & MKW2XDM_IRQSTS1_SEQIRQ) { if (dregs[MKW2XDM_IRQSTS1] & MKW2XDM_IRQSTS1_CCAIRQ) { irqsts1 |= MKW2XDM_IRQSTS1_CCAIRQ; if (dregs[MKW2XDM_IRQSTS2] & MKW2XDM_IRQSTS2_CCA) { DEBUG("[kw2xrf] CCA CH busy\n"); netdev->event_callback(netdev, NETDEV_EVENT_TX_MEDIUM_BUSY); } } DEBUG("[kw2xrf] SEQIRQ\n"); irqsts1 |= MKW2XDM_IRQSTS1_SEQIRQ; assert(dev->pending_tx != 0); dev->pending_tx--; netdev->event_callback(netdev, NETDEV_EVENT_TX_COMPLETE); kw2xrf_seq_timeout_off(dev); kw2xrf_set_idle_sequence(dev); } else if (dregs[MKW2XDM_IRQSTS3] & MKW2XDM_IRQSTS3_TMR4IRQ) { DEBUG("[kw2xrf] TC4TMOUT, no SEQIRQ, TX failed\n"); assert(dev->pending_tx != 0); dev->pending_tx--; netdev->event_callback(netdev, NETDEV_EVENT_TX_NOACK); kw2xrf_seq_timeout_off(dev); kw2xrf_set_sequence(dev, dev->idle_state); } kw2xrf_write_dreg(dev, MKW2XDM_IRQSTS1, irqsts1); dregs[MKW2XDM_IRQSTS1] &= ~irqsts1; }
static void _isr_event_seq_ccca(netdev_t *netdev, uint8_t *dregs) { kw2xrf_t *dev = (kw2xrf_t *)netdev; uint8_t irqsts1 = 0; if ((dregs[MKW2XDM_IRQSTS1] & MKW2XDM_IRQSTS1_CCAIRQ) && (dregs[MKW2XDM_IRQSTS1] & MKW2XDM_IRQSTS1_SEQIRQ)) { irqsts1 |= MKW2XDM_IRQSTS1_CCAIRQ | MKW2XDM_IRQSTS1_SEQIRQ; DEBUG("[kw2xrf] CCCA CH idle\n"); kw2xrf_seq_timeout_off(dev); kw2xrf_set_sequence(dev, dev->idle_state); } else if (dregs[MKW2XDM_IRQSTS3] & MKW2XDM_IRQSTS3_TMR4IRQ) { irqsts1 |= MKW2XDM_IRQSTS1_CCAIRQ | MKW2XDM_IRQSTS1_SEQIRQ; DEBUG("[kw2xrf] CCCA timeout\n"); kw2xrf_seq_timeout_off(dev); kw2xrf_set_sequence(dev, dev->idle_state); } kw2xrf_write_dreg(dev, MKW2XDM_IRQSTS1, irqsts1); dregs[MKW2XDM_IRQSTS1] &= ~irqsts1; }
static int _send(netdev_t *netdev, const struct iovec *vector, unsigned count) { kw2xrf_t *dev = (kw2xrf_t *)netdev; const struct iovec *ptr = vector; uint8_t *pkt_buf = &(dev->buf[1]); size_t len = 0; /* load packet data into buffer */ for (unsigned i = 0; i < count; i++, ptr++) { /* current packet data + FCS too long */ if ((len + ptr->iov_len + IEEE802154_FCS_LEN) > KW2XRF_MAX_PKT_LENGTH) { LOG_ERROR("[kw2xrf] packet too large (%u byte) to be send\n", (unsigned)len + IEEE802154_FCS_LEN); return -EOVERFLOW; } len = kw2xrf_tx_load(pkt_buf, ptr->iov_base, ptr->iov_len, len); } /* make sure ongoing t or tr sequenz are finished */ if (kw2xrf_can_switch_to_idle(dev)) { kw2xrf_set_sequence(dev, XCVSEQ_IDLE); dev->pending_tx++; } else { /* do not wait, this can lead to a dead lock */ return 0; } /* * Nbytes = FRAME_LEN - 2 -> FRAME_LEN = Nbytes + 2 * MKW2xD Reference Manual, P.192 */ dev->buf[0] = len + IEEE802154_FCS_LEN; /* Help for decision to use T or TR sequenz */ _send_last_fcf = dev->buf[1]; kw2xrf_write_fifo(dev, dev->buf, dev->buf[0]); #ifdef MODULE_NETSTATS_L2 netdev->stats.tx_bytes += len; #endif /* send data out directly if pre-loading id disabled */ if (!(dev->netdev.flags & KW2XRF_OPT_PRELOADING)) { kw2xrf_tx_exec(dev); } return (int)len; }