Exemplo n.º 1
0
static int cc2520_csma_tx(u8 * buf, u8 len)
{
	int backoff;

	if (!csma_enabled) {
		return csma_bottom->tx(buf, len);
	}

	spin_lock_irqsave(&state_sl, flags);
	if (csma_state == CC2520_CSMA_IDLE) {
		csma_state = CC2520_CSMA_TX;
		spin_unlock_irqrestore(&state_sl, flags);

		memcpy(cur_tx_buf, buf, len);
		cur_tx_len = len;

		backoff = cc2520_csma_get_backoff(backoff_min, backoff_max_init);

		DBG((KERN_INFO "[cc2520] - waiting %d uS to send.\n", backoff));
		cc2520_csma_start_timer(backoff);
	}
	else {
		spin_unlock_irqrestore(&state_sl, flags);
		DBG((KERN_INFO "[cc2520] - csma layer busy.\n"));
		csma_top->tx_done(-CC2520_TX_BUSY);
	}

	return 0;
}
Exemplo n.º 2
0
static enum hrtimer_restart cc2520_csma_timer_cb(struct hrtimer *timer)
{
	ktime_t kt;
	int new_backoff;

	//printk(KERN_INFO "[cc2520] - csma timer fired. \n");
	if (cc2520_radio_is_clear()) {
		//printk(KERN_INFO "[cc2520] - channel clear, sending.\n");
		csma_bottom->tx(cur_tx_buf, cur_tx_len);
		return HRTIMER_NORESTART;		
	}
	else {
		spin_lock(&state_sl);
		if (csma_state == CC2520_CSMA_TX) {
			csma_state = CC2520_CSMA_CONG;
			spin_unlock(&state_sl);

			new_backoff = 
				cc2520_csma_get_backoff(backoff_min, backoff_max_cong);

			INFO((KERN_INFO "[cc2520] - channel still busy, waiting %d uS\n", new_backoff));
			kt=ktime_set(0,1000 * new_backoff);
			hrtimer_forward_now(&backoff_timer, kt);
			return HRTIMER_RESTART;
		}
		else {
			csma_state = CC2520_CSMA_IDLE;
			spin_unlock(&state_sl);

			INFO((KERN_INFO "[cc2520] - csma/ca: channel busy. aborting tx\n"));
			csma_top->tx_done(-CC2520_TX_BUSY);
			return HRTIMER_NORESTART;
		}
	}
}
Exemplo n.º 3
0
static enum hrtimer_restart cc2520_csma_timer_cb(struct hrtimer *timer)
{
	ktime_t kt;
	int new_backoff;

	if (cc2520_radio_is_clear()) {
		// NOTE: We can absolutely not send from
		// interrupt context, there's a few places
		// where we spin lock and assume we can be
		// preempted. If we're running in atomic mode
		// that promise is broken. We use a work queue.

		// The workqueue adds about 30uS of latency.
		INIT_WORK(&work, cc2520_csma_wq);
		queue_work(wq, &work);
		return HRTIMER_NORESTART;
	}
	else {
		spin_lock_irqsave(&state_sl, flags);
		if (csma_state == CC2520_CSMA_TX) {
			csma_state = CC2520_CSMA_CONG;
			spin_unlock_irqrestore(&state_sl, flags);

			new_backoff =
				cc2520_csma_get_backoff(backoff_min, backoff_max_cong);

			INFO((KERN_INFO "[cc2520] - channel still busy, waiting %d uS\n", new_backoff));
			kt = ktime_set(0,1000 * new_backoff);
			hrtimer_forward_now(&backoff_timer, kt);
			return HRTIMER_RESTART;
		}
		else {
			csma_state = CC2520_CSMA_IDLE;
			spin_unlock_irqrestore(&state_sl, flags);

			csma_top->tx_done(-CC2520_TX_BUSY);
			return HRTIMER_NORESTART;
		}
	}
}
Exemplo n.º 4
0
static int cc2520_csma_tx(u8 * buf, u8 len)
{
	int backoff;

	spin_lock(&state_sl);
	if (csma_state == CC2520_CSMA_IDLE) {
		csma_state = CC2520_CSMA_TX;
		spin_unlock(&state_sl);

		memcpy(cur_tx_buf, buf, len);
		cur_tx_len = len;

		backoff = cc2520_csma_get_backoff(backoff_min, backoff_max_init);

		//printk(KERN_INFO "[cc2520] - waiting %d uS to send.\n", backoff);
		cc2520_csma_start_timer(backoff);
	}
	else {
		spin_unlock(&state_sl);
		csma_top->tx_done(-CC2520_TX_BUSY);
	}

	return 0;
}