예제 #1
0
static void psock_insert_segment(FAR struct tcp_wrbuffer_s *wrb,
                                 FAR sq_queue_t *q)
{
    sq_entry_t *entry = (sq_entry_t*)wrb;
    sq_entry_t *insert = NULL;

    sq_entry_t *itr;
    for (itr = sq_peek(q); itr; itr = sq_next(itr))
    {
        FAR struct tcp_wrbuffer_s *wrb0 = (FAR struct tcp_wrbuffer_s*)itr;
        if (WRB_SEQNO(wrb0) < WRB_SEQNO(wrb))
        {
            insert = itr;
        }
        else
        {
            break;
        }
    }

    if (insert)
    {
        sq_addafter(insert, entry, q);
    }
    else
    {
        sq_addfirst(entry, q);
    }
}
예제 #2
0
void ieee80211_crypto_detach(struct ieee80211_s *ic)
{
  FAR struct ieee80211_pmk *pmk;
  int i;

  /* Purge the PMKSA cache */

  while ((pmk = (FAR struct ieee80211_pmk *)sq_peek(&ic->ic_pmksa)) != NULL)
    {
      sq_remfirst(&ic->ic_pmksa);
      memset(pmk, 0, sizeof(struct ieee80211_pmk));
      kfree(pmk);
    }

  /* Clear all group keys from memory */

  for (i = 0; i < IEEE80211_GROUP_NKID; i++)
    {
      struct ieee80211_key *k = &ic->ic_nw_keys[i];
      if (k->k_cipher != IEEE80211_CIPHER_NONE)
        {
          (*ic->ic_delete_key) (ic, NULL, k);
        }

      memset(k, 0, sizeof(*k));
    }

  /* Clear pre-shared key from memory */

  memset(ic->ic_psk, 0, IEEE80211_PMK_LEN);
}
예제 #3
0
/*
 * Reschedule the next timer interrupt.
 *
 * This routine must be called with interrupts disabled.
 */
static void
hrt_call_reschedule()
{
	hrt_abstime	now = hrt_absolute_time();
	struct hrt_call	*next = (struct hrt_call *)sq_peek(&callout_queue);
	hrt_abstime	deadline = now + HRT_INTERVAL_MAX;

	/*
	 * Determine what the next deadline will be.
	 *
	 * Note that we ensure that this will be within the counter
	 * period, so that when we truncate all but the low 16 bits
	 * the next time the compare matches it will be the deadline
	 * we want.
	 *
	 * It is important for accurate timekeeping that the compare
	 * interrupt fires sufficiently often that the base_time update in 
	 * hrt_absolute_time runs at least once per timer period.
	 */
	if (next != NULL) {
		//lldbg("entry in queue\n");
		if (next->deadline <= (now + HRT_INTERVAL_MIN)) {
			//lldbg("pre-expired\n");
			/* set a minimal deadline so that we call ASAP */
			deadline = now + HRT_INTERVAL_MIN;
		} else if (next->deadline < deadline) {
			//lldbg("due soon\n");
			deadline = next->deadline;
		}
	}
	//lldbg("schedule for %u at %u\n", (unsigned)(deadline & 0xffffffff), (unsigned)(now & 0xffffffff));

	/* set the new compare value */
	rCCR_HRT = deadline & 0xffff;
}
예제 #4
0
perf_counter_t
perf_alloc_once(enum perf_counter_type type, const char *name)
{
	pthread_mutex_lock(&perf_counters_mutex);
	perf_counter_t handle = (perf_counter_t)sq_peek(&perf_counters);

	while (handle != NULL) {
		if (!strcmp(handle->name, name)) {
			if (type == handle->type) {
				/* they are the same counter */
				pthread_mutex_unlock(&perf_counters_mutex);
				return handle;

			} else {
				/* same name but different type, assuming this is an error and not intended */
				pthread_mutex_unlock(&perf_counters_mutex);
				return NULL;
			}
		}

		handle = (perf_counter_t)sq_next(&handle->link);
	}

	pthread_mutex_unlock(&perf_counters_mutex);

	/* if the execution reaches here, no existing counter of that name was found */
	return perf_alloc(type, name);
}
예제 #5
0
static void
hrt_call_enter(struct hrt_call *entry)
{
	struct hrt_call	*call, *next;

	call = (struct hrt_call *)sq_peek(&callout_queue);

	if ((call == NULL) || (entry->deadline < call->deadline)) {
		sq_addfirst(&entry->link, &callout_queue);
		//lldbg("call enter at head, reschedule\n");
		/* we changed the next deadline, reschedule the timer event */
		hrt_call_reschedule();

	} else {
		do {
			next = (struct hrt_call *)sq_next(&call->link);

			if ((next == NULL) || (entry->deadline < next->deadline)) {
				//lldbg("call enter after head\n");
				sq_addafter(&call->link, &entry->link, &callout_queue);
				break;
			}
		} while ((call = next) != NULL);
	}

	//lldbg("scheduled\n");
}
예제 #6
0
static void send_insert_seqment(FAR struct uip_wrbuffer_s *segment,
                                FAR sq_queue_t *q)
{
  sq_entry_t *entry = (sq_entry_t*)segment;
  sq_entry_t *insert = NULL;

  sq_entry_t *itr;
  for (itr = sq_peek(q); itr; itr = sq_next(itr))
    {
      FAR struct uip_wrbuffer_s *segment0 = (FAR struct uip_wrbuffer_s*)itr;
      if (segment0->wb_seqno < segment->wb_seqno)
        {
          insert = itr;
        }
      else
        {
          break;
        }
    }

  if (insert)
    {
      sq_addafter(insert, entry, q);
    }
  else
    {
      sq_addfirst(entry, q);
    }
}
예제 #7
0
static void
hrt_call_invoke(void)
{
	struct hrt_call	*call;
	hrt_abstime deadline;

	hrt_lock();

	while (true) {
		/* get the current time */
		hrt_abstime now = hrt_absolute_time();

		call = (struct hrt_call *)sq_peek(&callout_queue);

		if (call == NULL) {
			break;
		}

		if (call->deadline > now) {
			break;
		}

		sq_rem(&call->link, &callout_queue);
		//PX4_INFO("call pop");

		/* save the intended deadline for periodic calls */
		deadline = call->deadline;

		/* zero the deadline, as the call has occurred */
		call->deadline = 0;

		/* invoke the callout (if there is one) */
		if (call->callout) {
			// Unlock so we don't deadlock in callback
			hrt_unlock();

			//PX4_INFO("call %p: %p(%p)", call, call->callout, call->arg);
			call->callout(call->arg);

			hrt_lock();
		}

		/* if the callout has a non-zero period, it has to be re-entered */
		if (call->period != 0) {
			// re-check call->deadline to allow for
			// callouts to re-schedule themselves
			// using hrt_call_delay()
			if (call->deadline <= now) {
				call->deadline = deadline + call->period;
				//PX4_INFO("call deadline set to %lu now=%lu", call->deadline,  now);
			}

			hrt_call_enter(call);
		}
	}

	hrt_unlock();
}
예제 #8
0
int uip_backlogdelete(FAR struct uip_conn *conn, FAR struct uip_conn *blconn)
{
  FAR struct uip_backlog_s     *bls;
  FAR struct uip_blcontainer_s *blc;
  FAR struct uip_blcontainer_s *prev;

  nllvdbg("conn=%p blconn=%p\n", conn, blconn);

#ifdef CONFIG_DEBUG
  if (!conn)
    {
      return -EINVAL;
    }
#endif

  bls = conn->backlog;
  if (bls)
    {
       /* Find the container hold the connection */

       for (blc = (FAR struct uip_blcontainer_s *)sq_peek(&bls->bl_pending), prev = NULL;
            blc;
            prev = blc, blc = (FAR struct uip_blcontainer_s *)sq_next(&blc->bc_node))
         {
            if (blc->bc_conn == blconn)
              {
                if (prev)
                  {
                    /* Remove the a container from the middle of the list of
                     * pending connections
                      */

                    (void)sq_remafter(&prev->bc_node, &bls->bl_pending);
                  }
                else
                  {
                    /* Remove the a container from the head of the list of
                     * pending connections
                     */

                    (void)sq_remfirst(&bls->bl_pending);
                  }

                /* Put container in the free list */

                blc->bc_conn = NULL;
                sq_addlast(&blc->bc_node, &bls->bl_free);
                return OK;
              }
          }

        nlldbg("Failed to find pending connection\n");
        return -EINVAL;
    }
  return OK;
}
예제 #9
0
void
perf_print_all(int fd)
{
	perf_counter_t handle = (perf_counter_t)sq_peek(&perf_counters);

	while (handle != NULL) {
		perf_print_counter_fd(fd, handle);
		handle = (perf_counter_t)sq_next(&handle->link);
	}
}
예제 #10
0
void ubmodem_uninitialize(struct ubmodem_s *modem)
{
  int ret;

  DEBUGASSERT(sq_peek(&modem->event_listeners) == NULL);
#ifdef CONFIG_UBMODEM_USRSOCK
  DEBUGASSERT(sq_peek(&modem->sockets.list) == NULL);
  DEBUGASSERT(sq_peek(&modem->sockets.removed_list) == NULL);
#endif
  DEBUGASSERT(sq_peek(&modem->timers) == NULL);
  DEBUGASSERT(sq_peek(&modem->tasks) == NULL);

  ret = ubmodem_hw_deinitialize(modem, modem->serial_fd);
  if (ret != OK)
    {
      dbg("Failed to deinitialize modem HW.\n");
    }
  free(modem);
}
예제 #11
0
void
perf_reset_all(void)
{
	perf_counter_t handle = (perf_counter_t)sq_peek(&perf_counters);

	while (handle != NULL) {
		perf_reset(handle);
		handle = (perf_counter_t)sq_next(&handle->link);
	}
	for (int i = 0; i <= latency_bucket_count; i++) {
		latency_counters[i] = 0;
	}
}
예제 #12
0
void
perf_print_all(int fd)
{
	pthread_mutex_lock(&perf_counters_mutex);
	perf_counter_t handle = (perf_counter_t)sq_peek(&perf_counters);

	while (handle != NULL) {
		perf_print_counter_fd(fd, handle);
		handle = (perf_counter_t)sq_next(&handle->link);
	}

	pthread_mutex_unlock(&perf_counters_mutex);
}
예제 #13
0
void
perf_iterate_all(perf_callback cb, void *user)
{
	pthread_mutex_lock(&perf_counters_mutex);
	perf_counter_t handle = (perf_counter_t)sq_peek(&perf_counters);

	while (handle != NULL) {
		cb(handle, user);
		handle = (perf_counter_t)sq_next(&handle->link);
	}

	pthread_mutex_unlock(&perf_counters_mutex);
}
예제 #14
0
파일: drv_hrt.c 프로젝트: JRiefers/Firmware
static void
hrt_call_invoke(void)
{
	struct hrt_call	*call;
	hrt_abstime deadline;

	while (true) {
		/* get the current time */
		hrt_abstime now = hrt_absolute_time();

		call = (struct hrt_call *)sq_peek(&callout_queue);

		if (call == NULL)
			break;

		if (call->deadline > now)
			break;

		sq_rem(&call->link, &callout_queue);
		//lldbg("call pop\n");

		/* save the intended deadline for periodic calls */
		deadline = call->deadline;

		/* zero the deadline, as the call has occurred */
		call->deadline = 0;

		/* invoke the callout (if there is one) */
		if (call->callout) {
			//lldbg("call %p: %p(%p)\n", call, call->callout, call->arg);
			call->callout(call->arg);
		}

		/* if the callout has a non-zero period, it has to be re-entered */
		if (call->period != 0) {
			// re-check call->deadline to allow for
			// callouts to re-schedule themselves
			// using hrt_call_delay()
			if (call->deadline <= now) {
				call->deadline = deadline + call->period;
			}

			hrt_call_enter(call);
		}
	}
}
예제 #15
0
static void handle_queued_events(struct conman_client_s *client,
                                 sq_queue_t *qevents)
{
  struct queued_event_s *item;

  item = (void *)sq_peek(qevents);
  while (item)
    {
      struct queued_event_s *curr = item;

      item = (void *)sq_next(&item->node);

      client->event_callback(client, curr->type, curr->payload,
                             curr->payloadlen, client->event_priv);
      free(curr->payload);
      free(curr);
    }
}
예제 #16
0
파일: pm_changestate.c 프로젝트: uiv/Lerdu
static inline void pm_changeall(enum pm_state_e newstate)
{
  FAR sq_entry_t *entry;

  /* Visit each registered callback structure. */

  for (entry = sq_peek(&g_pmglobals.registry); entry; entry = sq_next(entry))
    {
      /* Is the notification callback supported? */

      FAR struct pm_callback_s *cb = (FAR struct pm_callback_s *)entry;
      if (cb->notify)
        {
          /* Yes.. notify the driver */

          cb->notify(cb, newstate);
        }
    }
}
예제 #17
0
파일: drv_hrt.c 프로젝트: js515/Firmware
/**
 * Reschedule the next timer interrupt.
 *
 * This routine must be called with interrupts disabled.
 */
static void
hrt_call_reschedule()
{
	hrt_abstime	now = hrt_absolute_time();
	hrt_abstime	delay = HRT_INTERVAL_MAX;
	struct hrt_call	*next = (struct hrt_call *)sq_peek(&callout_queue);
	hrt_abstime	deadline = now + HRT_INTERVAL_MAX;

	//PX4_INFO("hrt_call_reschedule");
	
	/*
	 * Determine what the next deadline will be.
	 *
	 * Note that we ensure that this will be within the counter
	 * period, so that when we truncate all but the low 16 bits
	 * the next time the compare matches it will be the deadline
	 * we want.
	 *
	 * It is important for accurate timekeeping that the compare
	 * interrupt fires sufficiently often that the base_time update in
	 * hrt_absolute_time runs at least once per timer period.
	 */
	if (next != NULL) {
		//lldbg("entry in queue\n");
		if (next->deadline <= (now + HRT_INTERVAL_MIN)) {
			//lldbg("pre-expired\n");
			/* set a minimal deadline so that we call ASAP */
			delay = HRT_INTERVAL_MIN;

		} else if (next->deadline < deadline) {
			//lldbg("due soon\n");
			delay = next->deadline - now;
		}
	}

	// There is no timer ISR, so simulate one by putting an event on the 
	// high priority work queue

	// Remove the existing expiry and update with the new expiry
	hrt_work_cancel(&_hrt_work);

        hrt_work_queue(&_hrt_work, (worker_t)&hrt_tim_isr, NULL, delay);
}
예제 #18
0
/****************************************************************************
 * Name: ubgps_publish_event
 *
 * Description:
 *   Publish event to registered callbacks
 *
 * Input Parameters:
 *   gps         - GPS object
 *   event       - Pointer to published event
 *
 * Returned Values:
 *   Status
 *
 ****************************************************************************/
void ubgps_publish_event(struct ubgps_s * const gps, struct gps_event_s const * const event)
{
  struct gps_callback_entry_s const * cb;

  DEBUGASSERT(gps && event);

  /* Check if any callback is interested of this event */

  if (!(gps->callback_event_mask & event->id))
    return;

  cb = (struct gps_callback_entry_s *)sq_peek(&gps->callbacks);
  while (cb)
    {
      if ((cb->event_mask & event->id) && cb->callback)
        cb->callback(event, cb->priv);

      cb = (struct gps_callback_entry_s *)sq_next(&cb->entry);
    }
}
예제 #19
0
void __conman_send_boardcast_event(struct conman_s *conman,
                                   enum conman_msgs_ids type,
                                   const void *payload,
                                   size_t payloadlen)
{
  struct conman_sd_entry_s *client;
  struct conman_resp_hdr hdr = {};
  int send_count = 0;
  int ret;

  hdr.head.id = type;
  hdr.head.len = payloadlen;
  hdr.respval = CONMAN_RESP_EVENT;

  for (client = (struct conman_sd_entry_s *)sq_peek(&conman->server.sds);
       client != NULL;
       client = (struct conman_sd_entry_s *)sq_next(&client->entry))
    {
      if (!client->events_enabled)
        {
          continue;
        }

      ret = __conman_util_block_write(client->sd, &hdr, sizeof(hdr));
      if (ret < 0)
        {
          continue;
        }

      ret = __conman_util_block_write(client->sd, payload, hdr.head.len);
      if (ret < 0)
        {
          continue;
        }

      send_count++;
    }

  conman_dbg("send boardcast event (type=%d) to %d clients.\n",
             type, send_count);
}
예제 #20
0
/**
 * Reschedule the next timer interrupt.
 *
 * This routine must be called with interrupts disabled.
 */
static void
hrt_call_reschedule()
{
	hrt_abstime	now = hrt_absolute_time();
	struct hrt_call	*next = (struct hrt_call *)sq_peek(&callout_queue);
	hrt_abstime	deadline = now + HRT_INTERVAL_MAX;
	uint32_t	ticks = USEC2TICK(HRT_INTERVAL_MAX*1000);

	//printf("hrt_call_reschedule\n");
	
	/*
	 * Determine what the next deadline will be.
	 *
	 * Note that we ensure that this will be within the counter
	 * period, so that when we truncate all but the low 16 bits
	 * the next time the compare matches it will be the deadline
	 * we want.
	 *
	 * It is important for accurate timekeeping that the compare
	 * interrupt fires sufficiently often that the base_time update in
	 * hrt_absolute_time runs at least once per timer period.
	 */
	if (next != NULL) {
		//lldbg("entry in queue\n");
		if (next->deadline <= (now + HRT_INTERVAL_MIN)) {
			//lldbg("pre-expired\n");
			/* set a minimal deadline so that we call ASAP */
			ticks = USEC2TICK(HRT_INTERVAL_MIN*1000);

		} else if (next->deadline < deadline) {
			//lldbg("due soon\n");
			ticks = USEC2TICK((next->deadline - now)*1000);
		}
	}

	// There is no timer ISR, so simulate one by putting an event on the 
	// high priority work queue
	//printf("ticks = %u\n", ticks);
        work_queue(HPWORK, &_hrt_work, (worker_t)&hrt_tim_isr, NULL, ticks);
}
예제 #21
0
파일: pm_changestate.c 프로젝트: uiv/Lerdu
static int pm_prepall(enum pm_state_e newstate)
{
  FAR sq_entry_t *entry;
  int ret = OK;

  /* Visit each registered callback structure. */

  for (entry = sq_peek(&g_pmglobals.registry);
       entry && ret == OK;
       entry = sq_next(entry))
    {
      /* Is the prepare callback supported? */

      FAR struct pm_callback_s *cb = (FAR struct pm_callback_s *)entry;
      if (cb->prepare)
        {
          /* Yes.. prepare the driver */

          ret = cb->prepare(cb, newstate);
        }
    }

  return ret;
}
예제 #22
0
void __ubgps_gc_callbacks(struct ubgps_s * const gps)
{
  struct gps_callback_entry_s * cb, * cbnext;

  /* Search for callback in queue */

  cb = (struct gps_callback_entry_s *)sq_peek(&gps->callbacks);
  while (cb)
    {
      /* Save next callback entry */

      cbnext = (struct gps_callback_entry_s *)sq_next(&cb->entry);

      if (cb->event_mask == 0)
        {
          /* Free unactive callback. */

          sq_rem(&cb->entry, &gps->callbacks);
          free(cb);
        }

      cb = cbnext;
    }
}
예제 #23
0
static uint16_t send_interrupt(FAR struct uip_driver_s *dev, FAR void *pvconn,
                               FAR void *pvpriv, uint16_t flags)
{
  FAR struct uip_conn *conn = (FAR struct uip_conn*)pvconn;
  FAR struct socket *psock = (FAR struct socket *)pvpriv;

  nllvdbg("flags: %04x\n", flags);

  /* If this packet contains an acknowledgement, then update the count of
   * acknowledged bytes.
   */

  if ((flags & UIP_ACKDATA) != 0)
    {
      FAR sq_entry_t *entry, *next;
      FAR struct uip_wrbuffer_s *segment;
      uint32_t ackno;

      ackno = uip_tcpgetsequence(TCPBUF->ackno);
      for (entry = sq_peek(&conn->unacked_q); entry; entry = next)
        {
          next    = sq_next(entry);
          segment = (FAR struct uip_wrbuffer_s*)entry;

          if (segment->wb_seqno < ackno)
            {
              nllvdbg("ACK: acked=%d buflen=%d ackno=%d\n",
                      segment->wb_seqno, segment->wb_nbytes, ackno);

              /* Segment was ACKed. Remove from ACK waiting queue */

              sq_rem(entry, &conn->unacked_q);

              /* Return the write buffer to the pool of free buffers */

              uip_tcpwrbuffer_release(segment);
            }
        }
    }

  /* Check for a loss of connection */

  else if ((flags & (UIP_CLOSE | UIP_ABORT | UIP_TIMEDOUT)) != 0)
    {
      /* Report not connected */

       nllvdbg("Lost connection\n");
       net_lostconnection(psock, flags);
       goto end_wait;
     }

   /* Check if we are being asked to retransmit data */

   else if ((flags & UIP_REXMIT) != 0)
    {
      sq_entry_t *entry;

      /* Put all segments that have been sent but not ACKed to write queue
       * again note, the un-ACKed segment is put at the first of the write_q,
       * so it can be sent as soon as possible.
       */

      while ((entry = sq_remlast(&conn->unacked_q)))
        {
          struct uip_wrbuffer_s *segment = (struct uip_wrbuffer_s*)entry;

          if (segment->wb_nrtx >= UIP_MAXRTX)
            {
              //conn->unacked -= segment->wb_nbytes;

              /* Return the write buffer */

              uip_tcpwrbuffer_release(segment);

              /* NOTE expired is different from un-ACKed, it is designed to
               * represent the number of segments that have been sent,
               * retransmitted, and un-ACKed, if expired is not zero, the
               * connection will be closed.
               *
               * field expired can only be updated at UIP_ESTABLISHED state
               */

              conn->expired++;
              continue;
            }

          send_insert_seqment(segment, &conn->write_q);
        }
    }

  /* Check if the outgoing packet is available (it may have been claimed
   * by a sendto interrupt serving a different thread).
   */

  if (dev->d_sndlen > 0)
    {
      /* Another thread has beat us sending data, wait for the next poll */

      return flags;
    }

  /* We get here if (1) not all of the data has been ACKed, (2) we have been
   * asked to retransmit data, (3) the connection is still healthy, and (4)
   * the outgoing packet is available for our use.  In this case, we are
   * now free to send more data to receiver -- UNLESS the buffer contains
   * unprocesed incoming data.  In that event, we will have to wait for the
   * next polling cycle.
   */

  if ((conn->tcpstateflags & UIP_ESTABLISHED) &&
      (flags & (UIP_POLL | UIP_REXMIT)) &&
      !(sq_empty(&conn->write_q)))
    {
      /* Check if the destination IP address is in the ARP table.  If not,
       * then the send won't actually make it out... it will be replaced with
       * an ARP request.
       *
       * NOTE 1: This could be an expensive check if there are a lot of
       * entries in the ARP table.
       *
       * NOTE 2: If we are actually harvesting IP addresses on incomming IP
       * packets, then this check should not be necessary; the MAC mapping
       * should already be in the ARP table.
       */

#if defined(CONFIG_NET_ETHERNET) && !defined(CONFIG_NET_ARP_IPIN)
      if (uip_arp_find(conn->ripaddr) != NULL)
#endif
        {
          FAR struct uip_wrbuffer_s *segment;
          FAR void *sndbuf;
          size_t sndlen;

          /* Get the amount of data that we can send in the next packet */

          segment = (FAR struct uip_wrbuffer_s *)sq_remfirst(&conn->write_q);
          if (segment)
            {
              sndbuf = segment->wb_buffer;
              sndlen = segment->wb_nbytes;

              DEBUGASSERT(sndlen <= uip_mss(conn));

              /* REVISIT:  There should be a check here to assure that we do
               * not excced the window (conn->winsize).
               */

              /* Set the sequence number for this segment.  NOTE: uIP
               * updates sndseq on receipt of ACK *before* this function
               * is called. In that case sndseq will point to the next
               * unacknowledged byte (which might have already been
               * sent). We will overwrite the value of sndseq here
               * before the packet is sent.
               */

              if (segment->wb_nrtx == 0 && segment->wb_seqno == (unsigned)-1)
                {
                  segment->wb_seqno = conn->isn + conn->sent;
                }

              uip_tcpsetsequence(conn->sndseq, segment->wb_seqno);

              /* Then set-up to send that amount of data. (this won't
               * actually happen until the polling cycle completes).
               */

              uip_send(dev, sndbuf, sndlen);

              /* Remember how much data we send out now so that we know
               * when everything has been acknowledged.  Just increment
               * the amount of data sent. This will be needed in
               * sequence* number calculations and we know that this is
               * not a re-transmission. Re-transmissions do not go through
               * this path.
               */

              if (segment->wb_nrtx == 0)
                {
                  conn->unacked += sndlen;
                  conn->sent    += sndlen;
                }

              /* Increment the retransmission counter before expiration.
               * NOTE we will not calculate the retransmission timer
               * (RTT) to save cpu cycles, each send_insert_seqment
               * segment will be retransmitted UIP_MAXRTX times in halt-
               * second interval before expiration.
               */

              segment->wb_nrtx++;

              /* The segment is waiting for ACK again */

              send_insert_seqment(segment, &conn->unacked_q);

              /* Only one data can be sent by low level driver at once,
               * tell the caller stop polling the other connection.
               */

              flags &= ~UIP_POLL;
            }
        }
    }

  /* Continue waiting */

  return flags;

end_wait:

  /* Do not allow any further callbacks */

  psock->s_sndcb->flags = 0;
  psock->s_sndcb->event = NULL;

  return flags;
}
예제 #24
0
static uint16_t psock_send_interrupt(FAR struct net_driver_s *dev,
                                     FAR void *pvconn, FAR void *pvpriv,
                                     uint16_t flags)
{
    FAR struct tcp_conn_s *conn = (FAR struct tcp_conn_s *)pvconn;
    FAR struct socket *psock = (FAR struct socket *)pvpriv;

    nllvdbg("flags: %04x\n", flags);

    /* If this packet contains an acknowledgement, then update the count of
     * acknowledged bytes.
     */

    if ((flags & TCP_ACKDATA) != 0)
    {
        FAR struct tcp_wrbuffer_s *wrb;
        FAR sq_entry_t *entry;
        FAR sq_entry_t *next;
        uint32_t ackno;

        ackno = tcp_getsequence(TCPBUF->ackno);
        nllvdbg("ACK: ackno=%u flags=%04x\n", ackno, flags);

        /* Look at every write buffer in the unacked_q.  The unacked_q
         * holds write buffers that have been entirely sent, but which
         * have not yet been ACKed.
         */

        for (entry = sq_peek(&conn->unacked_q); entry; entry = next)
        {
            uint32_t lastseq;

            /* Check of some or all of this write buffer has been ACKed. */

            next = sq_next(entry);
            wrb = (FAR struct tcp_wrbuffer_s*)entry;

            /* If the ACKed sequence number is greater than the start
             * sequence number of the write buffer, then some or all of
             * the write buffer has been ACKed.
             */

            if (ackno > WRB_SEQNO(wrb))
            {
                /* Get the sequence number at the end of the data */

                lastseq = WRB_SEQNO(wrb) + WRB_PKTLEN(wrb);
                nllvdbg("ACK: wrb=%p seqno=%u lastseq=%u pktlen=%u ackno=%u\n",
                        wrb, WRB_SEQNO(wrb), lastseq, WRB_PKTLEN(wrb), ackno);

                /* Has the entire buffer been ACKed? */

                if (ackno >= lastseq)
                {
                    nllvdbg("ACK: wrb=%p Freeing write buffer\n", wrb);

                    /* Yes... Remove the write buffer from ACK waiting queue */

                    sq_rem(entry, &conn->unacked_q);

                    /* And return the write buffer to the pool of free buffers */

                    tcp_wrbuffer_release(wrb);
                }
                else
                {
                    unsigned int trimlen;

                    /* No, then just trim the ACKed bytes from the beginning
                     * of the write buffer.  This will free up some I/O buffers
                     * that can be reused while are still sending the last
                     * buffers in the chain.
                     */

                    trimlen = ackno - WRB_SEQNO(wrb);
                    if (trimlen > WRB_SENT(wrb))
                    {
                        /* More data has been ACKed then we have sent? */

                        trimlen = WRB_SENT(wrb);
                    }

                    nllvdbg("ACK: wrb=%p trim %u bytes\n", wrb, trimlen);

                    WRB_TRIM(wrb, trimlen);
                    WRB_SEQNO(wrb) = ackno;
                    WRB_SENT(wrb) -= trimlen;

                    /* Set the new sequence number for what remains */

                    nllvdbg("ACK: wrb=%p seqno=%u pktlen=%u\n",
                            wrb, WRB_SEQNO(wrb), WRB_PKTLEN(wrb));
                }
            }
        }

        /* A special case is the head of the write_q which may be partially
         * sent and so can still have un-ACKed bytes that could get ACKed
         * before the entire write buffer has even been sent.
         */

        wrb = (FAR struct tcp_wrbuffer_s*)sq_peek(&conn->write_q);
        if (wrb && WRB_SENT(wrb) > 0 && ackno > WRB_SEQNO(wrb))
        {
            uint32_t nacked;

            /* Number of bytes that were ACKed */

            nacked = ackno - WRB_SEQNO(wrb);
            if (nacked > WRB_SENT(wrb))
            {
                /* More data has been ACKed then we have sent? ASSERT? */

                nacked = WRB_SENT(wrb);
            }

            nllvdbg("ACK: wrb=%p seqno=%u nacked=%u sent=%u ackno=%u\n",
                    wrb, WRB_SEQNO(wrb), nacked, WRB_SENT(wrb), ackno);

            /* Trim the ACKed bytes from the beginning of the write buffer. */

            WRB_TRIM(wrb, nacked);
            WRB_SEQNO(wrb) = ackno;
            WRB_SENT(wrb) -= nacked;

            nllvdbg("ACK: wrb=%p seqno=%u pktlen=%u sent=%u\n",
                    wrb, WRB_SEQNO(wrb), WRB_PKTLEN(wrb), WRB_SENT(wrb));
        }
    }

    /* Check for a loss of connection */

    else if ((flags & (TCP_CLOSE | TCP_ABORT | TCP_TIMEDOUT)) != 0)
    {
        nllvdbg("Lost connection: %04x\n", flags);

        /* Report not connected */

        net_lostconnection(psock, flags);

        /* Free write buffers and terminate polling */

        psock_lost_connection(psock, conn);
        return flags;
    }

    /* Check if we are being asked to retransmit data */

    else if ((flags & TCP_REXMIT) != 0)
    {
        FAR struct tcp_wrbuffer_s *wrb;
        FAR sq_entry_t *entry;

        nllvdbg("REXMIT: %04x\n", flags);

        /* If there is a partially sent write buffer at the head of the
         * write_q?  Has anything been sent from that write buffer?
         */

        wrb = (FAR struct tcp_wrbuffer_s *)sq_peek(&conn->write_q);
        nllvdbg("REXMIT: wrb=%p sent=%u\n", wrb, wrb ? WRB_SENT(wrb) : 0);

        if (wrb != NULL && WRB_SENT(wrb) > 0)
        {
            FAR struct tcp_wrbuffer_s *tmp;
            uint16_t sent;

            /* Yes.. Reset the number of bytes sent sent from the write buffer */

            sent = WRB_SENT(wrb);
            if (conn->unacked > sent)
            {
                conn->unacked -= sent;
            }
            else
            {
                conn->unacked = 0;
            }

            if (conn->sent > sent)
            {
                conn->sent -= sent;
            }
            else
            {
                conn->sent = 0;
            }

            WRB_SENT(wrb) = 0;
            nllvdbg("REXMIT: wrb=%p sent=%u, conn unacked=%d sent=%d\n",
                    wrb, WRB_SENT(wrb), conn->unacked, conn->sent);

            /* Increment the retransmit count on this write buffer. */

            if (++WRB_NRTX(wrb) >= TCP_MAXRTX)
            {
                nlldbg("Expiring wrb=%p nrtx=%u\n", wrb, WRB_NRTX(wrb));

                /* The maximum retry count as been exhausted. Remove the write
                 * buffer at the head of the queue.
                 */

                tmp = (FAR struct tcp_wrbuffer_s *)sq_remfirst(&conn->write_q);
                DEBUGASSERT(tmp == wrb);
                UNUSED(tmp);

                /* And return the write buffer to the free list */

                tcp_wrbuffer_release(wrb);

                /* NOTE expired is different from un-ACKed, it is designed to
                 * represent the number of segments that have been sent,
                 * retransmitted, and un-ACKed, if expired is not zero, the
                 * connection will be closed.
                 *
                 * field expired can only be updated at TCP_ESTABLISHED state
                 */

                conn->expired++;
            }
        }

        /* Move all segments that have been sent but not ACKed to the write
         * queue again note, the un-ACKed segments are put at the head of the
         * write_q so they can be resent as soon as possible.
         */

        while ((entry = sq_remlast(&conn->unacked_q)) != NULL)
        {
            wrb = (FAR struct tcp_wrbuffer_s*)entry;
            uint16_t sent;

            /* Reset the number of bytes sent sent from the write buffer */

            sent = WRB_SENT(wrb);
            if (conn->unacked > sent)
            {
                conn->unacked -= sent;
            }
            else
            {
                conn->unacked = 0;
            }

            if (conn->sent > sent)
            {
                conn->sent -= sent;
            }
            else
            {
                conn->sent = 0;
            }

            WRB_SENT(wrb) = 0;
            nllvdbg("REXMIT: wrb=%p sent=%u, conn unacked=%d sent=%d\n",
                    wrb, WRB_SENT(wrb), conn->unacked, conn->sent);

            /* Free any write buffers that have exceed the retry count */

            if (++WRB_NRTX(wrb) >= TCP_MAXRTX)
            {
                nlldbg("Expiring wrb=%p nrtx=%u\n", wrb, WRB_NRTX(wrb));

                /* Return the write buffer to the free list */

                tcp_wrbuffer_release(wrb);

                /* NOTE expired is different from un-ACKed, it is designed to
                 * represent the number of segments that have been sent,
                 * retransmitted, and un-ACKed, if expired is not zero, the
                 * connection will be closed.
                 *
                 * field expired can only be updated at TCP_ESTABLISHED state
                 */

                conn->expired++;
                continue;
            }
            else
            {
                /* Insert the write buffer into the write_q (in sequence
                 * number order).  The retransmission will occur below
                 * when the write buffer with the lowest sequenc number
                 * is pulled from the write_q again.
                 */

                nllvdbg("REXMIT: Moving wrb=%p nrtx=%u\n", wrb, WRB_NRTX(wrb));

                psock_insert_segment(wrb, &conn->write_q);
            }
        }
    }

    /* Check if the outgoing packet is available (it may have been claimed
     * by a sendto interrupt serving a different thread).
     */

    if (dev->d_sndlen > 0)
    {
        /* Another thread has beat us sending data, wait for the next poll */

        return flags;
    }

    /* We get here if (1) not all of the data has been ACKed, (2) we have been
     * asked to retransmit data, (3) the connection is still healthy, and (4)
     * the outgoing packet is available for our use.  In this case, we are
     * now free to send more data to receiver -- UNLESS the buffer contains
     * unprocessed incoming data.  In that event, we will have to wait for the
     * next polling cycle.
     */

    if ((conn->tcpstateflags & TCP_ESTABLISHED) &&
            (flags & (TCP_POLL | TCP_REXMIT)) &&
            !(sq_empty(&conn->write_q)))
    {
        /* Check if the destination IP address is in the ARP table.  If not,
         * then the send won't actually make it out... it will be replaced with
         * an ARP request.
         *
         * NOTE 1: This could be an expensive check if there are a lot of
         * entries in the ARP table.
         *
         * NOTE 2: If we are actually harvesting IP addresses on incoming IP
         * packets, then this check should not be necessary; the MAC mapping
         * should already be in the ARP table in many cases.
         *
         * NOTE 3: If CONFIG_NET_ARP_SEND then we can be assured that the IP
         * address mapping is already in the ARP table.
         */

#if defined(CONFIG_NET_ETHERNET) && !defined(CONFIG_NET_ARP_IPIN) && \
    !defined(CONFIG_NET_ARP_SEND)
        if (arp_find(conn->ripaddr) != NULL)
#endif
        {
            FAR struct tcp_wrbuffer_s *wrb;
            size_t sndlen;

            /* Peek at the head of the write queue (but don't remove anything
             * from the write queue yet).  We know from the above test that
             * the write_q is not empty.
             */

            wrb = (FAR struct tcp_wrbuffer_s *)sq_peek(&conn->write_q);
            DEBUGASSERT(wrb);

            /* Get the amount of data that we can send in the next packet.
             * We will send either the remaining data in the buffer I/O
             * buffer chain, or as much as will fit given the MSS and current
             * window size.
             */

            sndlen = WRB_PKTLEN(wrb) - WRB_SENT(wrb);
            if (sndlen > tcp_mss(conn))
            {
                sndlen = tcp_mss(conn);
            }

            if (sndlen > conn->winsize)
            {
                sndlen = conn->winsize;
            }

            nllvdbg("SEND: wrb=%p pktlen=%u sent=%u sndlen=%u\n",
                    wrb, WRB_PKTLEN(wrb), WRB_SENT(wrb), sndlen);

            /* Set the sequence number for this segment.  If we are
             * retransmitting, then the sequence number will already
             * be set for this write buffer.
             */

            if (WRB_SEQNO(wrb) == (unsigned)-1)
            {
                WRB_SEQNO(wrb) = conn->isn + conn->sent;
            }

            /* The TCP stack updates sndseq on receipt of ACK *before*
             * this function is called. In that case sndseq will point
             * to the next unacknowledged byte (which might have already
             * been sent). We will overwrite the value of sndseq here
             * before the packet is sent.
             */

            tcp_setsequence(conn->sndseq, WRB_SEQNO(wrb) + WRB_SENT(wrb));

            /* Then set-up to send that amount of data with the offset
             * corresponding to the amount of data already sent. (this
             * won't actually happen until the polling cycle completes).
             */

            devif_iob_send(dev, WRB_IOB(wrb), sndlen, WRB_SENT(wrb));

            /* Remember how much data we send out now so that we know
             * when everything has been acknowledged.  Just increment
             * the amount of data sent. This will be needed in sequence
             * number calculations.
             */

            conn->unacked += sndlen;
            conn->sent    += sndlen;

            nllvdbg("SEND: wrb=%p nrtx=%u unacked=%u sent=%u\n",
                    wrb, WRB_NRTX(wrb), conn->unacked, conn->sent);

            /* Increment the count of bytes sent from this write buffer */

            WRB_SENT(wrb) += sndlen;

            nllvdbg("SEND: wrb=%p sent=%u pktlen=%u\n",
                    wrb, WRB_SENT(wrb), WRB_PKTLEN(wrb));

            /* Remove the write buffer from the write queue if the
             * last of the data has been sent from the buffer.
             */

            DEBUGASSERT(WRB_SENT(wrb) <= WRB_PKTLEN(wrb));
            if (WRB_SENT(wrb) >= WRB_PKTLEN(wrb))
            {
                FAR struct tcp_wrbuffer_s *tmp;

                nllvdbg("SEND: wrb=%p Move to unacked_q\n", wrb);

                tmp = (FAR struct tcp_wrbuffer_s *)sq_remfirst(&conn->write_q);
                DEBUGASSERT(tmp == wrb);
                UNUSED(tmp);

                /* Put the I/O buffer chain in the un-acked queue; the
                 * segment is waiting for ACK again
                 */

                psock_insert_segment(wrb, &conn->unacked_q);
            }

            /* Only one data can be sent by low level driver at once,
             * tell the caller stop polling the other connection.
             */

            flags &= ~TCP_POLL;
        }
    }

    /* Continue waiting */

    return flags;
}