Esempio n. 1
0
int wd_cancel (WDOG_ID wdid)
{
  wdog_t    *curr;
  wdog_t    *prev;
  irqstate_t saved_state;
  int        ret = ERROR;

  /* Prohibit timer interactions with the timer queue until the
   * cancellation is complete
   */

  saved_state = irqsave();

  /* Make sure that the watchdog is initialed (non-NULL) and is still active */

  if (wdid && wdid->active)
    {
      /* Search the g_wdactivelist for the target FCB.  We can't use sq_rem
       * to do this because there are additional operations that need to be
       * done.
       */

      prev = NULL;
      curr = (wdog_t*)g_wdactivelist.head;

      while((curr) && (curr != wdid))
        {
          prev = curr;
          curr = curr->next;
        }

      /* Check if the watchdog was found in the list.  If not, then an OS
       * error has occurred because the watchdog is marked active!
       */

      if (!curr)
        {
          PANIC(OSERR_WDOGNOTFOUND);
        }
      else
        {
          /* If there is a watchdog in the timer queue after the one that
           * is being canceled, then it inherits the remaining ticks.
           */

          if (curr->next)
            {
              curr->next->lag += curr->lag;
            }

          /* Now, remove the watchdog from the timer queue */

          if (prev)
            {
              (void)sq_remafter((FAR sq_entry_t*)prev, &g_wdactivelist);
            }
          else
            {
              (void)sq_remfirst(&g_wdactivelist);
            }
          wdid->next = NULL;

          /* Return success */

          ret = OK;
        }

      /* Mark the watchdog inactive */

      wdid->active = false;
    }

  irqrestore(saved_state);
  return ret;
}
Esempio n. 2
0
struct uip_readahead_s *uip_tcpreadaheadalloc(void)
{
  return (struct uip_readahead_s*)sq_remfirst(&g_freebuffers);
}
Esempio n. 3
0
static int
task_main(int argc, char *argv[])
{
	work_q_item_t *work;

	/* Initialize global variables */
	g_key_offsets[0] = 0;

	for (unsigned i = 0; i < (DM_KEY_NUM_KEYS - 1); i++)
		g_key_offsets[i + 1] = g_key_offsets[i] + (g_per_item_max_index[i] * k_sector_size);

	unsigned max_offset = g_key_offsets[DM_KEY_NUM_KEYS - 1] + (g_per_item_max_index[DM_KEY_NUM_KEYS - 1] * k_sector_size);

	for (unsigned i = 0; i < dm_number_of_funcs; i++)
		g_func_counts[i] = 0;

	/* Initialize the item type locks, for now only DM_KEY_MISSION_STATE supports locking */
	sem_init(&g_sys_state_mutex, 1, 1); /* Initially unlocked */
	for (unsigned i = 0; i < DM_KEY_NUM_KEYS; i++)
		g_item_locks[i] = NULL;
	g_item_locks[DM_KEY_MISSION_STATE] = &g_sys_state_mutex;

	g_task_should_exit = false;

	init_q(&g_work_q);
	init_q(&g_free_q);

	sem_init(&g_work_queued_sema, 1, 0);

	/* See if the data manage file exists and is a multiple of the sector size */
	g_task_fd = open(k_data_manager_device_path, O_RDONLY | O_BINARY);
	if (g_task_fd >= 0) {
		/* File exists, check its size */
		int file_size = lseek(g_task_fd, 0, SEEK_END);
		if ((file_size % k_sector_size) != 0) {
			warnx("Incompatible data manager file %s, resetting it", k_data_manager_device_path);
			close(g_task_fd);
			unlink(k_data_manager_device_path);
		}
		else
			close(g_task_fd);
	}

	/* Open or create the data manager file */
	g_task_fd = open(k_data_manager_device_path, O_RDWR | O_CREAT | O_BINARY);

	if (g_task_fd < 0) {
		warnx("Could not open data manager file %s", k_data_manager_device_path);
		sem_post(&g_init_sema); /* Don't want to hang startup */
		return -1;
	}

	if ((unsigned)lseek(g_task_fd, max_offset, SEEK_SET) != max_offset) {
		close(g_task_fd);
		warnx("Could not seek data manager file %s", k_data_manager_device_path);
		sem_post(&g_init_sema); /* Don't want to hang startup */
		return -1;
	}

	fsync(g_task_fd);

	printf("dataman: ");
	/* see if we need to erase any items based on restart type */
	int sys_restart_val;
	if (param_get(param_find("SYS_RESTART_TYPE"), &sys_restart_val) == OK) {
		if (sys_restart_val == DM_INIT_REASON_POWER_ON) {
			printf("Power on restart");
			_restart(DM_INIT_REASON_POWER_ON);
		} else if (sys_restart_val == DM_INIT_REASON_IN_FLIGHT) {
			printf("In flight restart");
			_restart(DM_INIT_REASON_IN_FLIGHT);
		} else {
			printf("Unknown restart");
		}
	} else {
		printf("Unknown restart");
	}

	/* We use two file descriptors, one for the caller context and one for the worker thread */
	/* They are actually the same but we need to some way to reject caller request while the */
	/* worker thread is shutting down but still processing requests */
	g_fd = g_task_fd;

	printf(", data manager file '%s' size is %d bytes\n", k_data_manager_device_path, max_offset);

	/* Tell startup that the worker thread has completed its initialization */
	sem_post(&g_init_sema);

	/* Start the endless loop, waiting for then processing work requests */
	while (true) {

		/* do we need to exit ??? */
		if ((g_task_should_exit) && (g_fd >= 0)) {
			/* Close the file handle to stop further queuing */
			g_fd = -1;
		}

		if (!g_task_should_exit) {
			/* wait for work */
			sem_wait(&g_work_queued_sema);
		}

		/* Empty the work queue */
		while ((work = dequeue_work_item())) {

			/* handle each work item with the appropriate handler */
			switch (work->func) {
			case dm_write_func:
				g_func_counts[dm_write_func]++;
				work->result =
					_write(work->write_params.item, work->write_params.index, work->write_params.persistence, work->write_params.buf, work->write_params.count);
				break;

			case dm_read_func:
				g_func_counts[dm_read_func]++;
				work->result =
					_read(work->read_params.item, work->read_params.index, work->read_params.buf, work->read_params.count);
				break;

			case dm_clear_func:
				g_func_counts[dm_clear_func]++;
				work->result = _clear(work->clear_params.item);
				break;

			case dm_restart_func:
				g_func_counts[dm_restart_func]++;
				work->result = _restart(work->restart_params.reason);
				break;

			default: /* should never happen */
				work->result = -1;
				break;
			}

			/* Inform the caller that work is done */
			sem_post(&work->wait_sem);
		}

		/* time to go???? */
		if ((g_task_should_exit) && (g_fd < 0))
			break;
	}

	close(g_task_fd);
	g_task_fd = -1;

	/* The work queue is now empty, empty the free queue */
	for (;;) {
		if ((work = (work_q_item_t *)sq_remfirst(&(g_free_q.q))) == NULL)
			break;
		if (work->first)
			free(work);
	}

	destroy_q(&g_work_q);
	destroy_q(&g_free_q);
	sem_destroy(&g_work_queued_sema);
	sem_destroy(&g_sys_state_mutex);

	return 0;
}
static void usbmsc_unbind(FAR struct usbdevclass_driver_s *driver,
                          FAR struct usbdev_s *dev)
{
  FAR struct usbmsc_dev_s *priv;
  FAR struct usbmsc_req_s *reqcontainer;
  irqstate_t flags;
  int i;

  usbtrace(TRACE_CLASSUNBIND, 0);

#ifdef CONFIG_DEBUG
  if (!driver || !dev || !dev->ep0)
    {
      usbtrace(TRACE_CLSERROR(USBMSC_TRACEERR_UNBINDINVALIDARGS), 0);
      return;
     }
#endif

  /* Extract reference to private data */

  priv = ((FAR struct usbmsc_driver_s*)driver)->dev;

#ifdef CONFIG_DEBUG
  if (!priv)
    {
      usbtrace(TRACE_CLSERROR(USBMSC_TRACEERR_EP0NOTBOUND1), 0);
      return;
    }
#endif

  /* The worker thread should have already been stopped by the
   * driver un-initialize logic.
   */

  DEBUGASSERT(priv->thstate == USBMSC_STATE_TERMINATED);

  /* Make sure that we are not already unbound */

  if (priv != NULL)
    {
      /* Make sure that the endpoints have been unconfigured.  If
       * we were terminated gracefully, then the configuration should
       * already have been reset.  If not, then calling usbmsc_resetconfig
       * should cause the endpoints to immediately terminate all
       * transfers and return the requests to us (with result == -ESHUTDOWN)
       */

      usbmsc_resetconfig(priv);
      up_mdelay(50);

      /* Free the pre-allocated control request */

      if (priv->ctrlreq != NULL)
        {
          usbmsc_freereq(dev->ep0, priv->ctrlreq);
          priv->ctrlreq = NULL;
        }

      /* Free pre-allocated read requests (which should all have
       * been returned to the free list at this time -- we don't check)
       */

      for (i = 0; i < CONFIG_USBMSC_NRDREQS; i++)
        {
          reqcontainer = &priv->rdreqs[i];
          if (reqcontainer->req)
            {
              usbmsc_freereq(priv->epbulkout, reqcontainer->req);
              reqcontainer->req = NULL;
            }
        }

      /* Free the bulk OUT endpoint */

      if (priv->epbulkout)
        {
          DEV_FREEEP(dev, priv->epbulkout);
          priv->epbulkout = NULL;
        }

      /* Free write requests that are not in use (which should be all
       * of them
       */

      flags = irqsave();
      while (!sq_empty(&priv->wrreqlist))
        {
          reqcontainer = (struct usbmsc_req_s *)sq_remfirst(&priv->wrreqlist);
          if (reqcontainer->req != NULL)
            {
              usbmsc_freereq(priv->epbulkin, reqcontainer->req);
            }
        }

      /* Free the bulk IN endpoint */

      if (priv->epbulkin)
        {
          DEV_FREEEP(dev, priv->epbulkin);
          priv->epbulkin = NULL;
        }

      irqrestore(flags);
    }
}
Esempio n. 5
0
static uint16_t send_interrupt(FAR struct uip_driver_s *dev, FAR void *pvconn,
                               FAR void *pvpriv, uint16_t flags)
{
  FAR struct uip_conn *conn = (FAR struct uip_conn*)pvconn;
  FAR struct socket *psock = (FAR struct socket *)pvpriv;

  nllvdbg("flags: %04x\n", flags);

  /* If this packet contains an acknowledgement, then update the count of
   * acknowledged bytes.
   */

  if ((flags & UIP_ACKDATA) != 0)
    {
      FAR sq_entry_t *entry, *next;
      FAR struct uip_wrbuffer_s *segment;
      uint32_t ackno;

      ackno = uip_tcpgetsequence(TCPBUF->ackno);
      for (entry = sq_peek(&conn->unacked_q); entry; entry = next)
        {
          next    = sq_next(entry);
          segment = (FAR struct uip_wrbuffer_s*)entry;

          if (segment->wb_seqno < ackno)
            {
              nllvdbg("ACK: acked=%d buflen=%d ackno=%d\n",
                      segment->wb_seqno, segment->wb_nbytes, ackno);

              /* Segment was ACKed. Remove from ACK waiting queue */

              sq_rem(entry, &conn->unacked_q);

              /* Return the write buffer to the pool of free buffers */

              uip_tcpwrbuffer_release(segment);
            }
        }
    }

  /* Check for a loss of connection */

  else if ((flags & (UIP_CLOSE | UIP_ABORT | UIP_TIMEDOUT)) != 0)
    {
      /* Report not connected */

       nllvdbg("Lost connection\n");
       net_lostconnection(psock, flags);
       goto end_wait;
     }

   /* Check if we are being asked to retransmit data */

   else if ((flags & UIP_REXMIT) != 0)
    {
      sq_entry_t *entry;

      /* Put all segments that have been sent but not ACKed to write queue
       * again note, the un-ACKed segment is put at the first of the write_q,
       * so it can be sent as soon as possible.
       */

      while ((entry = sq_remlast(&conn->unacked_q)))
        {
          struct uip_wrbuffer_s *segment = (struct uip_wrbuffer_s*)entry;

          if (segment->wb_nrtx >= UIP_MAXRTX)
            {
              //conn->unacked -= segment->wb_nbytes;

              /* Return the write buffer */

              uip_tcpwrbuffer_release(segment);

              /* NOTE expired is different from un-ACKed, it is designed to
               * represent the number of segments that have been sent,
               * retransmitted, and un-ACKed, if expired is not zero, the
               * connection will be closed.
               *
               * field expired can only be updated at UIP_ESTABLISHED state
               */

              conn->expired++;
              continue;
            }

          send_insert_seqment(segment, &conn->write_q);
        }
    }

  /* Check if the outgoing packet is available (it may have been claimed
   * by a sendto interrupt serving a different thread).
   */

  if (dev->d_sndlen > 0)
    {
      /* Another thread has beat us sending data, wait for the next poll */

      return flags;
    }

  /* We get here if (1) not all of the data has been ACKed, (2) we have been
   * asked to retransmit data, (3) the connection is still healthy, and (4)
   * the outgoing packet is available for our use.  In this case, we are
   * now free to send more data to receiver -- UNLESS the buffer contains
   * unprocesed incoming data.  In that event, we will have to wait for the
   * next polling cycle.
   */

  if ((conn->tcpstateflags & UIP_ESTABLISHED) &&
      (flags & (UIP_POLL | UIP_REXMIT)) &&
      !(sq_empty(&conn->write_q)))
    {
      /* Check if the destination IP address is in the ARP table.  If not,
       * then the send won't actually make it out... it will be replaced with
       * an ARP request.
       *
       * NOTE 1: This could be an expensive check if there are a lot of
       * entries in the ARP table.
       *
       * NOTE 2: If we are actually harvesting IP addresses on incomming IP
       * packets, then this check should not be necessary; the MAC mapping
       * should already be in the ARP table.
       */

#if defined(CONFIG_NET_ETHERNET) && !defined(CONFIG_NET_ARP_IPIN)
      if (uip_arp_find(conn->ripaddr) != NULL)
#endif
        {
          FAR struct uip_wrbuffer_s *segment;
          FAR void *sndbuf;
          size_t sndlen;

          /* Get the amount of data that we can send in the next packet */

          segment = (FAR struct uip_wrbuffer_s *)sq_remfirst(&conn->write_q);
          if (segment)
            {
              sndbuf = segment->wb_buffer;
              sndlen = segment->wb_nbytes;

              DEBUGASSERT(sndlen <= uip_mss(conn));

              /* REVISIT:  There should be a check here to assure that we do
               * not excced the window (conn->winsize).
               */

              /* Set the sequence number for this segment.  NOTE: uIP
               * updates sndseq on receipt of ACK *before* this function
               * is called. In that case sndseq will point to the next
               * unacknowledged byte (which might have already been
               * sent). We will overwrite the value of sndseq here
               * before the packet is sent.
               */

              if (segment->wb_nrtx == 0 && segment->wb_seqno == (unsigned)-1)
                {
                  segment->wb_seqno = conn->isn + conn->sent;
                }

              uip_tcpsetsequence(conn->sndseq, segment->wb_seqno);

              /* Then set-up to send that amount of data. (this won't
               * actually happen until the polling cycle completes).
               */

              uip_send(dev, sndbuf, sndlen);

              /* Remember how much data we send out now so that we know
               * when everything has been acknowledged.  Just increment
               * the amount of data sent. This will be needed in
               * sequence* number calculations and we know that this is
               * not a re-transmission. Re-transmissions do not go through
               * this path.
               */

              if (segment->wb_nrtx == 0)
                {
                  conn->unacked += sndlen;
                  conn->sent    += sndlen;
                }

              /* Increment the retransmission counter before expiration.
               * NOTE we will not calculate the retransmission timer
               * (RTT) to save cpu cycles, each send_insert_seqment
               * segment will be retransmitted UIP_MAXRTX times in halt-
               * second interval before expiration.
               */

              segment->wb_nrtx++;

              /* The segment is waiting for ACK again */

              send_insert_seqment(segment, &conn->unacked_q);

              /* Only one data can be sent by low level driver at once,
               * tell the caller stop polling the other connection.
               */

              flags &= ~UIP_POLL;
            }
        }
    }

  /* Continue waiting */

  return flags;

end_wait:

  /* Do not allow any further callbacks */

  psock->s_sndcb->flags = 0;
  psock->s_sndcb->event = NULL;

  return flags;
}
static inline void recvfrom_readahead(struct recvfrom_s *pstate)
{
    FAR struct uip_conn        *conn = (FAR struct uip_conn *)pstate->rf_sock->s_conn;
    FAR struct uip_readahead_s *readahead;
    size_t                      recvlen;

    /* Check there is any TCP data already buffered in a read-ahead
     * buffer.
     */

    do
    {
        /* Get the read-ahead buffer at the head of the list (if any) */

        readahead = (struct uip_readahead_s *)sq_remfirst(&conn->readahead);
        if (readahead)
        {
            /* We have a new buffer... transfer that buffered data into
             * the user buffer.
             *
             * First, get the length of the data to transfer.
             */

            if (readahead->rh_nbytes > pstate->rf_buflen)
            {
                recvlen = pstate->rf_buflen;
            }
            else
            {
                recvlen = readahead->rh_nbytes;
            }

            if (recvlen > 0)
            {
                /* Copy the read-ahead data into the user buffer */

                memcpy(pstate->rf_buffer, readahead->rh_buffer, recvlen);
                nllvdbg("Received %d bytes (of %d)\n", recvlen, readahead->rh_nbytes);

                /* Update the accumulated size of the data read */

                pstate->rf_recvlen += recvlen;
                pstate->rf_buffer  += recvlen;
                pstate->rf_buflen  -= recvlen;
            }

            /* If the read-ahead buffer is empty, then release it.  If not, then
             * we will have to move the data down and return the buffer to the
             * front of the list.
             */

            if (recvlen < readahead->rh_nbytes)
            {
                readahead->rh_nbytes -= recvlen;
                memcpy(readahead->rh_buffer, &readahead->rh_buffer[recvlen],
                       readahead->rh_nbytes);
                sq_addfirst(&readahead->rh_node, &conn->readahead);
            }
            else
            {
                uip_tcpreadaheadrelease(readahead);
            }
        }
    }
    while (readahead && pstate->rf_buflen > 0);
}
Esempio n. 7
0
FAR struct mqueue_msg_s *mq_waitreceive(mqd_t mqdes)
{
  FAR struct tcb_s *rtcb;
  FAR struct mqueue_inode_s *msgq;
  FAR struct mqueue_msg_s *rcvmsg;

  /* Get a pointer to the message queue */

  msgq = mqdes->msgq;

  /* Get the message from the head of the queue */

  while ((rcvmsg = (FAR struct mqueue_msg_s *)sq_remfirst(&msgq->msglist)) == NULL)
    {
      /* The queue is empty!  Should we block until there the above condition
       * has been satisfied?
       */

      if ((mqdes->oflags & O_NONBLOCK) == 0)
        {
          /* Yes.. Block and try again */

          rtcb = (FAR struct tcb_s *)g_readytorun.head;
          rtcb->msgwaitq = msgq;
          msgq->nwaitnotempty++;

          set_errno(OK);
          up_block_task(rtcb, TSTATE_WAIT_MQNOTEMPTY);

          /* When we resume at this point, either (1) the message queue
           * is no longer empty, or (2) the wait has been interrupted by
           * a signal.  We can detect the latter case be examining the
           * errno value (should be either EINTR or ETIMEDOUT).
           */

          if (get_errno() != OK)
            {
              break;
            }
        }
      else
        {
          /* The queue was empty, and the O_NONBLOCK flag was set for the
           * message queue description referred to by 'mqdes'.
           */

          set_errno(EAGAIN);
          break;
        }
    }

  /* If we got message, then decrement the number of messages in
   * the queue while we are still in the critical section
   */

  if (rcvmsg)
    {
      msgq->nmsgs--;
    }

  return rcvmsg;
}
Esempio n. 8
0
void tcp_free(FAR struct tcp_conn_s *conn)
{
  FAR struct devif_callback_s *cb;
  FAR struct devif_callback_s *next;
#ifdef CONFIG_NET_TCP_WRITE_BUFFERS
  FAR struct tcp_wrbuffer_s *wrbuffer;
#endif
  net_lock_t flags;

  /* Because g_free_tcp_connections is accessed from user level and interrupt
   * level, code, it is necessary to keep interrupts disabled during this
   * operation.
   */

  DEBUGASSERT(conn->crefs == 0);
  flags = net_lock();

  /* Free remaining callbacks, actually there should be only the close callback
   * left.
   */

  for (cb = conn->list; cb; cb = next)
    {
      next = cb->flink;
      tcp_callback_free(conn, cb);
    }

  /* TCP_ALLOCATED means that that the connection is not in the active list
   * yet.
   */

  if (conn->tcpstateflags != TCP_ALLOCATED)
    {
      /* Remove the connection from the active list */

      dq_rem(&conn->node, &g_active_tcp_connections);
    }

#ifdef CONFIG_NET_TCP_READAHEAD
  /* Release any read-ahead buffers attached to the connection */

  iob_free_queue(&conn->readahead);
#endif

#ifdef CONFIG_NET_TCP_WRITE_BUFFERS
  /* Release any write buffers attached to the connection */

  while ((wrbuffer = (struct tcp_wrbuffer_s *)sq_remfirst(&conn->write_q)) != NULL)
    {
      tcp_wrbuffer_release(wrbuffer);
    }

  while ((wrbuffer = (struct tcp_wrbuffer_s *)sq_remfirst(&conn->unacked_q)) != NULL)
    {
      tcp_wrbuffer_release(wrbuffer);
    }
#endif

#ifdef CONFIG_NET_TCPBACKLOG
  /* Remove any backlog attached to this connection */

  if (conn->backlog)
    {
      tcp_backlogdestroy(conn);
    }

  /* If this connection is, itself, backlogged, then remove it from the
   * parent connection's backlog list.
   */

  if (conn->blparent)
    {
      tcp_backlogdelete(conn->blparent, conn);
    }
#endif

  /* Mark the connection available and put it into the free list */

  conn->tcpstateflags = TCP_CLOSED;
  dq_addlast(&conn->node, &g_free_tcp_connections);
  net_unlock(flags);
}