Ejemplo n.º 1
0
static int sensor_accel_op_start_reporting(struct device *dev,
    uint8_t sensor_id, uint64_t sampling_period, uint64_t max_report_latency)
{
    struct sensor_accel_info *info = NULL;

    gb_debug("%s:\n",__func__);

    if (!dev || !device_get_private(dev)) {
        return -EINVAL;
    }
    info = device_get_private(dev);

    /* cancel any work and reset ourselves */
    if (!work_available(&info->data_report_work))
        work_cancel(LPWORK, &info->data_report_work);

    data = 1000;

    /* if not already scheduled, schedule start */
    if (work_available(&info->data_report_work))
        work_queue(LPWORK, &info->data_report_work, accel_data_report_worker, info, 0);

    atomic_inc(&txn);

    return OK;
}
Ejemplo n.º 2
0
static void accel_data_report_worker(void *arg)
{
    struct sensor_accel_info *info;
    struct report_info *rinfo;
    struct report_info_data *rinfo_data;
    struct sensor_event_data *event_data;
    struct timespec ts;
    uint16_t payload_size;

    payload_size = (PRESSURE_READING_NUM * sizeof(struct sensor_event_data))
                    + (REPORTING_SENSORS * sizeof(struct report_info));

    info = arg;
    if (info->callback) {
        rinfo_data = malloc(sizeof(struct report_info_data) + payload_size);
        if (!rinfo_data)
            goto out;
        rinfo_data->num_sensors_reporting = REPORTING_SENSORS;
        rinfo = rinfo_data->reportinfo;
        rinfo->id = info->sensor_id;
        rinfo->flags = 0;
        event_data = (struct sensor_event_data *)&rinfo->data_payload[0];

        up_rtc_gettime(&ts);
        rinfo->reference_time = timespec_to_nsec(&ts);
        gb_debug("[%u.%03u]\n", ts.tv_sec, (ts.tv_nsec / 1000000));
#ifdef BATCH_PROCESS_ENABLED
        /*
         * Batch sensor data values and its time_deltas
         * until max fifo event count
        */
#else
        /* Single sensor event data */
        rinfo->readings = PRESSURE_READING_NUM;
        event_data->time_delta = 0;
        event_data->data_value[0] = data++;
        event_data->data_value[1] = data++;
        event_data->data_value[2] = data++;
#endif
        gb_debug("report sensor: %d\n", rinfo->id);
        info->callback(info->sensor_id, rinfo_data, payload_size);

        free(rinfo_data);
    }

out:

     /* cancel any work and reset ourselves */
    if (!work_available(&info->data_report_work))
        work_cancel(LPWORK, &info->data_report_work);

    /* if not already scheduled, schedule start */
    if (work_available(&info->data_report_work))
        work_queue(LPWORK, &info->data_report_work, accel_data_report_worker, info, MSEC2TICK(1200));
}
Ejemplo n.º 3
0
/*
 * This serves as both the GPIO IRQ handler and the debounce handler. When
 * called as the debounce handler, IRQs are probably not already disabled when
 * entering this function.
 */
static int ara_key_irqhandler(int irq, void *context)
{
    struct ara_key_context *key = &the_ara_key;
    bool value, active;
    irqstate_t flags;

    flags = irqsave();

    value = !!gpio_get_value(key->db.gpio);
    active = (value == key->rising_edge);

    dbg_insane("ara key press value: %u active: %u\n", value, active);

    if (!debounce_gpio(&key->db, active)) {
	goto out;
    }

    dbg_insane("ara key press value: %u active: %u (stable)\n", value, active);

    /* if something is pending, cancel it so we can pass the correct active */
    if (!work_available(&key->irq_work)) {
	work_cancel(HPWORK, &key->irq_work);
    }
    work_queue(HPWORK, &key->irq_work, ara_key_irqworker,
               (void*)(uintptr_t)active, 0);

out:
    irqrestore(flags);

    return OK;
}
Ejemplo n.º 4
0
/****************************************************************************
 * Name: adc_interrupt
 *
 * Description:
 *   Common ADC interrupt handler.
 *
 * Input Parameters:
 *
 * Returned Value:
 *
 ****************************************************************************/
static int adc_interrupt(int irq, FAR void *context, void *arg)
{
	int ret;
	FAR struct s5j_dev_s *priv = (FAR struct s5j_dev_s *)arg;

	if (getreg32(S5J_ADC_INT_STATUS) & ADC_INT_STATUS_PENDING) {
		/* Clear interrupt pending */
		putreg32(ADC_INT_STATUS_PENDING, S5J_ADC_INT_STATUS);

		/*
		 * Check if interrupt work is already queued. If it is already
		 * busy, then we already have interrupt processing in the
		 * pipeline and we need to do nothing more.
		 */
		if (work_available(&priv->work)) {
			ret = work_queue(LPWORK, &priv->work, adc_conversion,
							priv, 0);
			if (ret != 0) {
				lldbg("ERROR: failed to queue work: %d\n", ret);
			}
		}
	}

	return OK;
}
Ejemplo n.º 5
0
static void stmpe811_timeout(int argc, uint32_t arg1, ...)
{
  FAR struct stmpe811_dev_s *priv = (FAR struct stmpe811_dev_s *)((uintptr_t)arg1);
  int ret;

  /* Are we still stuck in the pen down state? */

  if (priv->sample.contact == CONTACT_DOWN ||
      priv->sample.contact == CONTACT_MOVE)
    {
      /* Yes... is the worker thread available?   If not, then apparently
       * we have work already pending?
       */

      if (work_available(&priv->timeout))
        {
          /* Yes.. Transfer processing to the worker thread.  Since STMPE811
           * interrupts are disabled while the work is pending, no special
           * action should be required to protect the work queue.
           */

          ret = work_queue(HPWORK, &priv->timeout, stmpe811_timeoutworker, priv, 0);
          if (ret != 0)
            {
              ierr("ERROR: Failed to queue work: %d\n", ret);
            }
        }
    }
}
Ejemplo n.º 6
0
static void skel_poll_expiry(int argc, wdparm_t arg, ...)
{
  FAR struct skel_driver_s *priv = (FAR struct skel_driver_s *)arg;

#ifdef CONFIG_NET_NOINTS
  /* Is our single work structure available?  It may not be if there are
   * pending interrupt actions.
   */

  if (work_available(&priv->sk_work))
    {
      /* Schedule to perform the interrupt processing on the worker thread. */

      work_queue(HPWORK, &priv->sk_work, skel_poll_work, priv, 0);
    }
  else
    {
      /* No.. Just re-start the watchdog poll timer, missing one polling
       * cycle.
       */

      (void)wd_start(priv->sk_txpoll, skeleton_WDDELAY, skel_poll_expiry, 1, arg);
    }

#else
  /* Process the interrupt now */

  skel_poll_process(priv);
#endif
}
Ejemplo n.º 7
0
static int skel_txavail(FAR struct net_driver_s *dev)
{
  FAR struct skel_driver_s *priv = (FAR struct skel_driver_s *)dev->d_private;

#ifdef CONFIG_NET_NOINTS
  /* Is our single work structure available?  It may not be if there are
   * pending interrupt actions and we will have to ignore the Tx
   * availability action.
   */

  if (work_available(&priv->sk_work))
    {
      /* Schedule to serialize the poll on the worker thread. */

      work_queue(HPWORK, &priv->sk_work, skel_txavail_work, priv, 0);
    }

#else
  irqstate_t flags;

  /* Disable interrupts because this function may be called from interrupt
   * level processing.
   */

  flags = irqsave();

  /* Perform the out-of-cycle poll now */

  skel_txavail_process(priv);
  irqrestore(flags);
#endif

  return OK;
}
Ejemplo n.º 8
0
static int gpio_pad_detect_isr(int irq, void *context)
{
    pm_activity(PM_ACTIVITY);

    if (work_available(&g_info->work))
        work_queue(HPWORK, &g_info->work, gpio_pad_detect_worker, NULL, 0);

    return OK;
}
Ejemplo n.º 9
0
/* schedule/cancel longpress event as needed */
static void ara_key_longpress_update(struct ara_key_context *key, bool active)
{
    irqstate_t flags;

    flags = irqsave();
    if (active) {
	/* if not already scheduled, schedule the longpress event */
        if (work_available(&key->longpress_work))
            work_queue(HPWORK, &key->longpress_work,
		    ara_key_longpress_worker, key,
		    MSEC2TICK(ARA_KEY_LONGPRESS_TIME_MS));
    } else {
	/* if key is released, cancel any pending longpress events */
        if (!work_available(&key->longpress_work))
            work_cancel(HPWORK, &key->longpress_work);
    }

    irqrestore(flags);
}
Ejemplo n.º 10
0
static int stmpe811_interrupt(int irq, FAR void *context)
{
  FAR struct stmpe811_dev_s    *priv;
  FAR struct stmpe811_config_s *config;
  int                          ret;

  /* Which STMPE811 device caused the interrupt? */

#ifndef CONFIG_STMPE811_MULTIPLE
  priv = &g_stmpe811;
#else
  for (priv = g_stmpe811list;
       priv && priv->config->irq != irq;
       priv = priv->flink);

  ASSERT(priv != NULL);
#endif

  /* Get a pointer the callbacks for convenience (and so the code is not so
   * ugly).
   */

  config = priv->config;
  DEBUGASSERT(config != NULL);

  /* Disable further interrupts */

  config->enable(config, false);

  /* Check if interrupt work is already queue.  If it is already busy, then
   * we already have interrupt processing in the pipeline and we need to do
   * nothing more.
   */

  if (work_available(&priv->work))
    {
      /* Yes.. Transfer processing to the worker thread.  Since STMPE811
       * interrupts are disabled while the work is pending, no special
       * action should be required to protect the work queue.
       */

      ret = work_queue(HPWORK, &priv->work, stmpe811_worker, priv, 0);
      if (ret != 0)
        {
          illdbg("Failed to queue work: %d\n", ret);
        }
    }

  /* Clear any pending interrupts and return success */

  config->clear(config);
  return OK;
}
Ejemplo n.º 11
0
static int bcmf_txavail(FAR struct net_driver_s *dev)
{
  FAR struct bcmf_dev_s *priv = (FAR struct bcmf_dev_s *)dev->d_private;

  /* Is our single work structure available?  It may not be if there are
   * pending interrupt actions and we will have to ignore the Tx
   * availability action.
   */

  if (work_available(&priv->bc_pollwork))
    {
      /* Schedule to serialize the poll on the worker thread. */

      work_queue(BCMFWORK, &priv->bc_pollwork, bcmf_txavail_work, priv, 0);
    }

  return OK;
}
Ejemplo n.º 12
0
static int sensor_accel_op_stop_reporting(struct device *dev,
    uint8_t sensor_id)
{
    struct sensor_accel_info *info = NULL;

    gb_debug("%s:\n",__func__);

    if (!dev || !device_get_private(dev)) {
        return -EINVAL;
    }
    info = device_get_private(dev);

    /* cancel any work and reset ourselves */
    if (!work_available(&info->data_report_work))
        work_cancel(LPWORK, &info->data_report_work);

    atomic_dec(&txn);

    return OK;
}
Ejemplo n.º 13
0
static void sensor_accel_dev_close(struct device *dev)
{
    struct sensor_accel_info *info = NULL;

    gb_debug("%s:\n",__func__);

    if (!dev || !device_get_private(dev)) {
        return;
    }
    info = device_get_private(dev);

    /* cancel any pending events */
    if (!work_available(&info->data_report_work))
        work_cancel(LPWORK, &info->data_report_work);

    if (!(info->flags & SENSOR_ACCEL_FLAG_OPEN)) {
        return;
    }

    info->flags &= ~SENSOR_ACCEL_FLAG_OPEN;
}
Ejemplo n.º 14
0
static void lo_poll_expiry(int argc, wdparm_t arg, ...)
{
  FAR struct lo_driver_s *priv = (FAR struct lo_driver_s *)arg;

  /* Is our single work structure available?  It may not be if there are
   * pending interrupt actions.
   */

  if (work_available(&priv->lo_work))
    {
      /* Schedule to perform the interrupt processing on the worker thread. */

      work_queue(HPWORK, &priv->lo_work, lo_poll_work, priv, 0);
    }
  else
    {
      /* No.. Just re-start the watchdog poll timer, missing one polling
       * cycle.
       */

      (void)wd_start(priv->lo_polldog, LO_WDDELAY, lo_poll_expiry, 1, arg);
    }
}
Ejemplo n.º 15
0
static void usbhost_callback(FAR void *arg, ssize_t nbytes)
{
  FAR struct usbhost_class_s *hubclass;
  FAR struct usbhost_hubpriv_s *priv;
  uint32_t delay = 0;

  DEBUGASSERT(arg != NULL);
  hubclass = (FAR struct usbhost_class_s *)arg;
  priv     = &((FAR struct usbhost_hubclass_s *)hubclass)->hubpriv;

  /* Check for a failure.  On higher end host controllers, the asynchronous
   * transfer will pend until data is available (OHCI and EHCI).  On lower
   * end host controllers (like STM32 and EFM32), the transfer will fail
   * immediately when the device NAKs the first attempted interrupt IN
   * transfer (with nbytes == -EAGAIN).  In that case (or in the case of
   * other errors), we must fall back to polling.
   */

  if (nbytes < 0)
    {
      /* This debug output is good to know, but really a nuisance for
       * those configurations where we have to fall back to polling.
       * FIX:  Don't output the message is the result is -EAGAIN.
       */

#if defined(CONFIG_DEBUG_USB) && !defined(CONFIG_DEBUG_VERBOSE)
      if (nbytes != -EAGAIN)
#endif
        {
          ulldbg("ERROR: Transfer failed: %d\n", (int)nbytes);
        }

      /* Indicate there there is nothing to do.  So when the work is
       * performed, nothing will happen other than we will set to receive
       * the next event.
       */

      priv->buffer[0] = 0;

      /* We don't know the nature of the failure, but we need to do all that
       * we can do to avoid a CPU hog error loop.
       *
       * Use the low-priority work queue and delay polling for the next
       * event.  We want to use as little CPU bandwidth as possible in this
       * case.
       */

      delay = POLL_DELAY;
    }

  /* The work structure should always be available since hub communications
   * are serialized.  However, there is a remote chance that this may
   * collide with a hub disconnection event.
   */

  if (work_available(&priv->work) && !priv->disconnected)
    {
      (void)work_queue(LPWORK, &priv->work, (worker_t)usbhost_hub_event,
                       hubclass, delay);
    }
}