Exemplo n.º 1
0
int
ble_hs_hci_evt_process(uint8_t *data)
{
    const struct ble_hs_hci_evt_dispatch_entry *entry;
    uint8_t event_code;
    uint8_t param_len;
    int event_len;
    int rc;

    /* Count events received */
    STATS_INC(ble_hs_stats, hci_event);

    /* Display to console */
    ble_hs_dbg_event_disp(data);

    /* Process the event */
    event_code = data[0];
    param_len = data[1];

    event_len = param_len + 2;

    entry = ble_hs_hci_evt_dispatch_find(event_code);
    if (entry == NULL) {
        STATS_INC(ble_hs_stats, hci_unknown_event);
        rc = BLE_HS_ENOTSUP;
    } else {
        rc = entry->cb(event_code, data, event_len);
    }

    ble_hci_trans_buf_free(data);

    return rc;
}
Exemplo n.º 2
0
/**
 * Count Link Layer statistics for received PDUs
 *
 * Context: Link layer task
 *
 * @param hdr
 * @param len
 */
static void
ble_ll_count_rx_stats(struct ble_mbuf_hdr *hdr, uint16_t len, uint8_t pdu_type)
{
    uint8_t crcok;
    uint8_t chan;

    crcok = BLE_MBUF_HDR_CRC_OK(hdr);
    chan = hdr->rxinfo.channel;
    if (crcok) {
        if (chan < BLE_PHY_NUM_DATA_CHANS) {
            STATS_INC(ble_ll_stats, rx_data_pdu_crc_ok);
            STATS_INCN(ble_ll_stats, rx_data_bytes_crc_ok, len);
        } else {
            STATS_INC(ble_ll_stats, rx_adv_pdu_crc_ok);
            STATS_INCN(ble_ll_stats, rx_adv_bytes_crc_ok, len);
            ble_ll_count_rx_adv_pdus(pdu_type);
        }
    } else {
        if (chan < BLE_PHY_NUM_DATA_CHANS) {
            STATS_INC(ble_ll_stats, rx_data_pdu_crc_err);
            STATS_INCN(ble_ll_stats, rx_data_bytes_crc_err, len);
        } else {
            STATS_INC(ble_ll_stats, rx_adv_pdu_crc_err);
            STATS_INCN(ble_ll_stats, rx_adv_bytes_crc_err, len);
        }
    }
}
Exemplo n.º 3
0
/**
 * Creates a new mailbox.
 *
 * @param size is the number of entries in the mailbox.
 * @return the handle of the created mailbox.
 */
sys_mbox_t
sys_mbox_new(int size)
{
  unsigned long datasize;
  xQueueHandle mbox;
  u32_t i;

  /* Fail if the mailbox size is too large. */
  if(size > MBOX_MAX) {
#if SYS_STATS
    STATS_INC(sys.mbox.err);
#endif /* SYS_STATS */
    return 0;
  }

  /* Find a mailbox that is not in use. */
  for(i = 0; i < SYS_MBOX_MAX; i++) {
    if(mboxes[i].queue == 0) {
      break;
    }
  }
  if(i == SYS_MBOX_MAX) {
#if SYS_STATS
    STATS_INC(sys.mbox.err);
#endif /* SYS_STATS */
    return 0;
  }

  /* Compute the size of the queue memory required by this mailbox. */
  datasize = (sizeof(void *) * size) + portQUEUE_OVERHEAD_BYTES;

  /* Create a queue for this mailbox. */
  if(xQueueCreate(mboxes[i].buffer, datasize, size, sizeof(void *),
                  &mbox) != pdPASS) {
#if SYS_STATS
    STATS_INC(sys.mbox.err);
#endif /* SYS_STATS */
    return 0;
  }

  /* Update the mailbox statistics. */
#if SYS_STATS
  STATS_INC(sys.mbox.used);
#if LWIP_STATS
  if(lwip_stats.sys.mbox.max < lwip_stats.sys.mbox.used) {
    lwip_stats.sys.mbox.max = lwip_stats.sys.mbox.used;
  }
#endif
#endif /* SYS_STATS */

  /* Save the queue handle. */
  mboxes[i].queue = mbox;

  /* Return this mailbox. */
  return &(mboxes[i]);
}
Exemplo n.º 4
0
/**
 * Called upon start of received PDU
 *
 * Context: Interrupt
 *
 * @param rxpdu
 *        chan
 *
 * @return int
 *   < 0: A frame we dont want to receive.
 *   = 0: Continue to receive frame. Dont go from rx to tx
 *   > 0: Continue to receive frame and go from rx to tx when done
 */
int
ble_ll_rx_start(uint8_t *rxbuf, uint8_t chan, struct ble_mbuf_hdr *rxhdr)
{
    int rc;
    uint8_t pdu_type;

    ble_ll_log(BLE_LL_LOG_ID_RX_START, chan, 0, rxhdr->beg_cputime);

    /* Check channel type */
    if (chan < BLE_PHY_NUM_DATA_CHANS) {
        /*
         * Data channel pdu. We should be in CONNECTION state with an
         * ongoing connection
         */
        if (g_ble_ll_data.ll_state == BLE_LL_STATE_CONNECTION) {
            rc = ble_ll_conn_rx_isr_start(rxhdr, ble_phy_access_addr_get());
        } else {
            STATS_INC(ble_ll_stats, bad_ll_state);
            rc = 0;
        }
        return rc;
    }

    /* Advertising channel PDU */
    pdu_type = rxbuf[0] & BLE_ADV_PDU_HDR_TYPE_MASK;

    switch (g_ble_ll_data.ll_state) {
    case BLE_LL_STATE_ADV:
        rc = ble_ll_adv_rx_isr_start(pdu_type);
        break;
    case BLE_LL_STATE_INITIATING:
        if ((pdu_type == BLE_ADV_PDU_TYPE_ADV_IND) ||
                (pdu_type == BLE_ADV_PDU_TYPE_ADV_DIRECT_IND)) {
            rc = 1;
        } else {
            rc = 0;
        }
        break;
    case BLE_LL_STATE_SCANNING:
        rc = ble_ll_scan_rx_isr_start(pdu_type, &rxhdr->rxinfo.flags);
        break;
    case BLE_LL_STATE_CONNECTION:
        /* Should not occur */
        assert(0);
        rc = 0;
        break;
    default:
        /* Should not be in this state! */
        rc = -1;
        STATS_INC(ble_ll_stats, bad_ll_state);
        break;
    }

    return rc;
}
/**
 * Creates a new mailbox.
 *
 * @param size is the number of entries in the mailbox.
 * @return the handle of the created mailbox.
 */
err_t
sys_mbox_new(sys_mbox_t *mbox, int size)
{
  u32_t i;

  /* Fail if the mailbox size is too large. */
  if(size > MBOX_MAX) {
#if SYS_STATS
    STATS_INC(sys.mbox.err);
#endif /* SYS_STATS */
    return ERR_MEM;
  }

  /* Find a mailbox that is not in use. */
  for(i = 0; i < SYS_MBOX_MAX; i++) {
    if(mboxes[i].queue == 0) {
      break;
    }
  }
  if(i == SYS_MBOX_MAX) {
#if SYS_STATS
    STATS_INC(sys.mbox.err);
#endif /* SYS_STATS */
    return ERR_MEM;
  }

#if RTOS_FREERTOS
  /* Create a queue for this mailbox. */
  mbox->queue = xQueueCreate(size, sizeof(void *));
  if(mbox == NULL) {
#endif /* RTOS_FREERTOS */

#if SYS_STATS
    STATS_INC(sys.mbox.err);
#endif /* SYS_STATS */
    return ERR_MEM;
  }

  /* Update the mailbox statistics. */
#if SYS_STATS
  STATS_INC(sys.mbox.used);
#if LWIP_STATS
  if(lwip_stats.sys.mbox.max < lwip_stats.sys.mbox.used) {
    lwip_stats.sys.mbox.max = lwip_stats.sys.mbox.used;
  }
#endif
#endif /* SYS_STATS */

  /* Save the queue handle. */
  mboxes[i].queue = mbox->queue;

  /* Return this mailbox. */
  return ERR_OK;
}
/**
 * Creates a new semaphore.
 *
 * @param count is non-zero if the semaphore should be acquired initially.
 * @return the handle of the created semaphore.
 */
err_t
sys_sem_new(sys_sem_t *sem, u8_t count)
{
  void *temp;
  u32_t i;

  /* Find a semaphore that is not in use. */
  for(i = 0; i < SYS_SEM_MAX; i++) {
    if(sems[i].queue == 0) {
      break;
    }
  }
  if(i == SYS_SEM_MAX) {
#if SYS_STATS
    STATS_INC(sys.sem.err);
#endif /* SYS_STATS */
    return ERR_MEM;
  }

  /* Create a single-entry queue to act as a semaphore. */
#if RTOS_FREERTOS
  sem->queue = xQueueCreate(1, sizeof(void *));
  if(sem->queue == NULL) {
#endif /* RTOS_FREERTOS */

#if SYS_STATS
    STATS_INC(sys.sem.err);
#endif /* SYS_STATS */
    return ERR_MEM;
  }

  /* Acquired the semaphore if necessary. */
  if(count == 0) {
    temp = 0;
    xQueueSend(sem->queue, &temp, 0);
  }

  /* Update the semaphore statistics. */
#if SYS_STATS
  STATS_INC(sys.sem.used);
#if LWIP_STATS
  if(lwip_stats.sys.sem.max < lwip_stats.sys.sem.used) {
    lwip_stats.sys.sem.max = lwip_stats.sys.sem.used;
  }
#endif
#endif /* SYS_STATS */

  /* Save the queue handle. */
  sems[i].queue = sem->queue;

  /* Return this semaphore. */
  return (ERR_OK);
}
Exemplo n.º 7
0
int
tsl2561_get_data(uint16_t *broadband, uint16_t *ir, struct tsl2561 *tsl2561)
{
    int rc;
    int delay_ticks;

    /* Wait integration time ms before getting a data sample */
    switch (tsl2561->cfg.integration_time) {
        case TSL2561_LIGHT_ITIME_13MS:
            delay_ticks = 14 * OS_TICKS_PER_SEC / 1000;
        break;
        case TSL2561_LIGHT_ITIME_101MS:
            delay_ticks = 102 * OS_TICKS_PER_SEC / 1000;
        break;
        case TSL2561_LIGHT_ITIME_402MS:
        default:
            delay_ticks = 403 * OS_TICKS_PER_SEC / 1000;
        break;
    }
    os_time_delay(delay_ticks);

    *broadband = *ir = 0;
    rc = tsl2561_read16(TSL2561_COMMAND_BIT | TSL2561_WORD_BIT | TSL2561_REGISTER_CHAN0_LOW,
                        broadband);
    if (rc) {
        goto err;
    }
    rc = tsl2561_read16(TSL2561_COMMAND_BIT | TSL2561_WORD_BIT | TSL2561_REGISTER_CHAN1_LOW,
                        ir);
    if (rc) {
        goto err;
    }

#if MYNEWT_VAL(TSL2561_STATS)
    switch (tsl2561->cfg.integration_time) {
        case TSL2561_LIGHT_ITIME_13MS:
            STATS_INC(g_tsl2561stats, samples_13ms);
        break;
        case TSL2561_LIGHT_ITIME_101MS:
            STATS_INC(g_tsl2561stats, samples_101ms);
        break;
        case TSL2561_LIGHT_ITIME_402MS:
            STATS_INC(g_tsl2561stats, samples_402ms);
        default:
        break;
    }
#endif

err:
    return rc;
}
Exemplo n.º 8
0
/**
 * Creates a new semaphore.
 *
 * @param count is non-zero if the semaphore should be acquired initially.
 * @return the handle of the created semaphore.
 */
sys_sem_t
sys_sem_new(u8_t count)
{
  xQueueHandle sem;
  void *temp;
  u32_t i;

  /* Find a semaphore that is not in use. */
  for(i = 0; i < SYS_SEM_MAX; i++) {
    if(sems[i].queue == 0) {
      break;
    }
  }
  if(i == SYS_SEM_MAX) {
#if SYS_STATS
    STATS_INC(sys.sem.err);
#endif /* SYS_STATS */
    return 0;
  }

  /* Create a single-entry queue to act as a semaphore. */
  if(xQueueCreate(sems[i].buffer, sizeof(sems[0].buffer), 1, sizeof(void *),
                  &sem) != pdPASS) {
#if SYS_STATS
    STATS_INC(sys.sem.err);
#endif /* SYS_STATS */
    return 0;
  }

  /* Acquired the semaphore if necessary. */
  if(count == 0) {
    temp = 0;
    xQueueSend(sem, &temp, 0);
  }

  /* Update the semaphore statistics. */
#if SYS_STATS
  STATS_INC(sys.sem.used);
#if LWIP_STATS
  if(lwip_stats.sys.sem.max < lwip_stats.sys.sem.used) {
    lwip_stats.sys.sem.max = lwip_stats.sys.sem.used;
  }
#endif
#endif /* SYS_STATS */

  /* Save the queue handle. */
  sems[i].queue = sem;

  /* Return this semaphore. */
  return &(sems[i]);
}
Exemplo n.º 9
0
/**
 * Destroys a mailbox.
 *
 * @param mbox is the mailbox to be destroyed.
 */
void
sys_mbox_free(sys_mbox_t *mbox)
{
  /* There should not be any messages waiting (if there are it is a bug).  If
     any are waiting, increment the mailbox error count. */
#if RTOS_FREERTOS
  if(uxQueueMessagesWaiting(mbox->queue) != 0) {
#endif /* RTOS_FREERTOS */

#if SYS_STATS
    STATS_INC(sys.mbox.err);
#endif /* SYS_STATS */
  }

	/* Find a semaphore that would be freed. */
	u32_t i;
	for(i = 0; i < SYS_MBOX_MAX; i++) {
		if(mboxes[i].queue == mbox->queue) {
			mboxes[i].queue = SYS_MBOX_NULL;
			vSemaphoreDelete(mbox->queue);
			mbox->queue = 0;
			return;
		}
	}

  /* Clear the queue handle. */
  mbox->queue = 0;

  /* Update the mailbox statistics. */
#if SYS_STATS
   STATS_DEC(sys.mbox.used);
#endif /* SYS_STATS */
}
Exemplo n.º 10
0
/**
 * Reads a single disk object from flash.
 *
 * @param area_idx              The area to read the object from.
 * @param area_offset           The offset within the area to read from.
 * @param out_disk_object       On success, the restored object gets written
 *                                  here.
 *
 * @return                      0 on success; nonzero on failure.
 */
static int
nffs_restore_disk_object(int area_idx, uint32_t area_offset,
                         struct nffs_disk_object *out_disk_object)
{
    int rc;

    rc = nffs_flash_read(area_idx, area_offset,
                         &out_disk_object->ndo_un_obj,
                         sizeof(out_disk_object->ndo_un_obj));
    if (rc != 0) {
        return rc;
    }
    STATS_INC(nffs_stats, nffs_readcnt_object);

    if (nffs_hash_id_is_inode(out_disk_object->ndo_disk_inode.ndi_id)) {
        out_disk_object->ndo_type = NFFS_OBJECT_TYPE_INODE;

    } else if (nffs_hash_id_is_block(out_disk_object->ndo_disk_block.ndb_id)) {
        out_disk_object->ndo_type = NFFS_OBJECT_TYPE_BLOCK;

    } else if (out_disk_object->ndo_disk_block.ndb_id == NFFS_ID_NONE) {
        return FS_EEMPTY;

    } else {
        return FS_ECORRUPT;
    }

    out_disk_object->ndo_area_idx = area_idx;
    out_disk_object->ndo_offset = area_offset;

    return 0;
}
Exemplo n.º 11
0
/**
 * Writes a chunk of data to flash.
 *
 * @param area_idx              The index of the area to write to.
 * @param area_offset           The offset within the area to write to.
 * @param data                  The data to write to flash.
 * @param len                   The number of bytes to write.
 *
 * @return                      0 on success;
 *                              FS_EOFFSET on an attempt to write to an
 *                                  invalid address range, or on an attempt to
 *                                  perform a non-strictly-sequential write;
 *                              FS_EFLASH_ERROR on flash error.
 */
int
nffs_flash_write(uint8_t area_idx, uint32_t area_offset, const void *data,
                 uint32_t len)
{
    struct nffs_area *area;
    int rc;

    assert(area_idx < nffs_num_areas);
    area = nffs_areas + area_idx;

    if (area_offset + len > area->na_length) {
        return FS_EOFFSET;
    }

    if (area_offset < area->na_cur) {
        return FS_EOFFSET;
    }

    STATS_INC(nffs_stats, nffs_iocnt_write);
    rc = nffs_os_flash_write(area->na_flash_id, area->na_offset + area_offset,
                         data, len);
    if (rc != 0) {
        return FS_EHW;
    }

    area->na_cur = area_offset + len;

    return 0;
}
Exemplo n.º 12
0
int
ble_l2cap_sig_reject_tx(struct ble_hs_conn *conn, struct ble_l2cap_chan *chan,
                        uint8_t id, uint16_t reason,
                        void *data, int data_len)
{
    struct ble_l2cap_sig_reject cmd;
    struct os_mbuf *txom;
    void *payload_buf;
    int rc;

    rc = ble_l2cap_sig_init_cmd(BLE_L2CAP_SIG_OP_REJECT, id,
                                BLE_L2CAP_SIG_REJECT_MIN_SZ + data_len, &txom,
                                &payload_buf);
    if (rc != 0) {
        return rc;
    }

    cmd.reason = reason;
    ble_l2cap_sig_reject_write(payload_buf, txom->om_len, &cmd,
                               data, data_len);

    STATS_INC(ble_l2cap_stats, sig_rx);
    rc = ble_l2cap_tx(conn, chan, txom);
    if (rc != 0) {
        return rc;
    }

    return 0;
}
Exemplo n.º 13
0
static void
ble_phy_isr(void)
{
    uint32_t irq_en;

    /* Read irq register to determine which interrupts are enabled */
    irq_en = NRF_RADIO->INTENCLR;

    /* Check for disabled event. This only happens for transmits now */
    if ((irq_en & RADIO_INTENCLR_DISABLED_Msk) && NRF_RADIO->EVENTS_DISABLED) {
        ble_phy_tx_end_isr();
    }

    /* We get this if we have started to receive a frame */
    if ((irq_en & RADIO_INTENCLR_ADDRESS_Msk) && NRF_RADIO->EVENTS_ADDRESS) {
        ble_phy_rx_start_isr();
    }

    /* Receive packet end (we dont enable this for transmit) */
    if ((irq_en & RADIO_INTENCLR_END_Msk) && NRF_RADIO->EVENTS_END) {
        ble_phy_rx_end_isr();
    }

    /* Ensures IRQ is cleared */
    irq_en = NRF_RADIO->SHORTS;

    /* Count # of interrupts */
    STATS_INC(ble_phy_stats, phy_isrs);
}
Exemplo n.º 14
0
/**
 * Puts the phy into receive mode.
 *
 * @return int 0: success; BLE Phy error code otherwise
 */
int
ble_phy_rx(void)
{
    /* Check radio state */
    nrf_wait_disabled();
    if (NRF_RADIO->STATE != RADIO_STATE_STATE_Disabled) {
        ble_phy_disable();
        STATS_INC(ble_phy_stats, radio_state_errs);
        return BLE_PHY_ERR_RADIO_STATE;
    }

    /* Make sure all interrupts are disabled */
    NRF_RADIO->INTENCLR = NRF_RADIO_IRQ_MASK_ALL;

    /* Clear events prior to enabling receive */
    NRF_RADIO->EVENTS_END = 0;
    NRF_RADIO->EVENTS_DISABLED = 0;

    /* Setup for rx */
    ble_phy_rx_xcvr_setup();

    /* Start the receive task in the radio if not automatically going to rx */
    if ((NRF_PPI->CHEN & PPI_CHEN_CH21_Msk) == 0) {
        NRF_RADIO->TASKS_RXEN = 1;
    }

    ble_ll_log(BLE_LL_LOG_ID_PHY_RX, g_ble_phy_data.phy_encrypted, 0, 0);

    return 0;
}
Exemplo n.º 15
0
static void
BalloonPageFree(Balloon *b,      // IN/OUT
                int isLargePage) // IN
{
   BalloonChunkList *chunkList = &b->pages[isLargePage];
   BalloonChunk *chunk;
   PageHandle page;

   ASSERT(DblLnkLst_IsLinked(&chunkList->chunks));
   chunk = DblLnkLst_Container(chunkList->chunks.next, BalloonChunk, node);

   /* deallocate last page */
   page = chunk->entries[--chunk->nEntries];

   /* deallocate page */
   OS_ReservedPageFree(page, isLargePage);

   STATS_INC(b->stats.primFree[isLargePage]);

   /* update balloon size */
   b->nPages--;

   /* reclaim chunk, if empty */
   BalloonChunkDestroyEmpty(b, chunk, isLargePage);
}
Exemplo n.º 16
0
void
Balloon_QueryAndExecute(void)
{
   Balloon *b = &globalBalloon;
   uint32 target = 0; // Silence compiler warning.
   int status;

   /* update stats */
   STATS_INC(b->stats.timer);

   /* reset, if specified */
   if (b->resetFlag) {
      BalloonReset(b);
   }

   /* contact monitor via backdoor */
   status = Backdoor_MonitorGetTarget(b, &target);

   /* decrement slowPageAllocationCycles counter */
   if (b->slowPageAllocationCycles > 0) {
      b->slowPageAllocationCycles--;
   }

   if (status == BALLOON_SUCCESS) {
      /* update target, adjust size */
      b->nPagesTarget = target;
      BalloonAdjustSize(b, target);
   }
}
Exemplo n.º 17
0
static int
BalloonUnlock(Balloon *b,      // IN/OUT
              uint16 nPages,   // IN
              int isLargePage, // IN
              uint32 *target)  // OUT
{
   PPN pagePPN = PA_2_PPN(OS_ReservedPageGetPA(b->pageHandle));
   int status = Backdoor_MonitorUnlockPage(b, pagePPN, target);

   ASSERT(!isLargePage);

   if (status != BALLOON_SUCCESS) {
      BalloonChunk *chunk = BalloonGetChunkOrFallback(b, FALSE);
      BalloonPageStore(chunk, b->pageHandle);
      goto out;
   }

   OS_ReservedPageFree(b->pageHandle, FALSE);
   STATS_INC(b->stats.primFree[FALSE]);

   /* update balloon size */
   b->nPages--;

out:
   b->pageHandle = PAGE_HANDLE_INVALID;
   if (b->fallbackChunk != NULL) {
      BalloonChunk_Destroy(b->fallbackChunk);
      b->fallbackChunk = NULL;
   }
   return status;
}
Exemplo n.º 18
0
/** Delete the object context.
 * \attention This is the only valid context deletion from within this block.
 */
void CqMotionModeBlock::EndMotionModeBlock()
{
	if( m_pDeformingSurface )
	{
		QGetRenderContext()->StorePrimitive( m_pDeformingSurface );
		STATS_INC( GPR_created );
	}
}
Exemplo n.º 19
0
/**
 * CqCurve constructor.
 */
CqCurve::CqCurve() : CqSurface()
{
	m_widthParamIndex = -1;
	m_constantwidthParamIndex = -1;
	m_splitDecision = Split_Undecided;

	STATS_INC( GPR_crv );
}
Exemplo n.º 20
0
static bool
alloc_itercb_snapshot(malloc_info_t *info, void *iter_data)
{
    stale_snap_allocs_t *snaps = (stale_snap_allocs_t *) iter_data;
    stale_per_alloc_t *spa = (stale_per_alloc_t *) info->client_data;
    uint cstack_id;
    uint bytes_asked_for;
    ASSERT(snaps != NULL, "invalid param");
    ASSERT(spa != NULL, "invalid param");
    /* FIXME: ignore pre_us? option-controlled? */
    cstack_id = get_cstack_id(spa->cstack);
    bytes_asked_for = info->request_size;
    ASSERT(snaps->idx < snaps->num_entries, "stale array overflow");
    if (snaps->uses_large) {
        snaps->data.lg[snaps->idx].cstack_id = cstack_id;
        snaps->data.lg[snaps->idx].bytes_asked_for = bytes_asked_for;
        snaps->data.lg[snaps->idx].last_access = spa->last_access;
    } else {
        ASSERT(spa->last_access <= STALE_SMALL_MAX_STAMP, "stale stamp overflow");
        snaps->data.sm.main[snaps->idx].last_access = spa->last_access;
        if (cstack_id <= STALE_SMALL_MAX_ID &&
            bytes_asked_for <= STALE_SMALL_MAX_SZ) {
            snaps->data.sm.main[snaps->idx].uses_ext = false;
            snaps->data.sm.main[snaps->idx].u.val.cstack_id = cstack_id;
            snaps->data.sm.main[snaps->idx].u.val.bytes_asked_for = bytes_asked_for;
        } else {
            STATS_INC(stale_small_needs_ext);
            snaps->data.sm.main[snaps->idx].uses_ext = true;
            if (snaps->data.sm.ext_entries >= snaps->data.sm.ext_capacity) {
                stale_snap_alloc_ext_t *newext;
                uint old_cap = snaps->data.sm.ext_capacity;
                if (snaps->data.sm.ext_capacity == 0)
                    snaps->data.sm.ext_capacity = STALE_SMALL_EXT_INITIAL_CAPACITY;
                else
                    snaps->data.sm.ext_capacity *= 2;
                newext = (stale_snap_alloc_ext_t *)
                    global_alloc(snaps->data.sm.ext_capacity*sizeof(*newext),
                                 HEAPSTAT_STALENESS);
                if (snaps->data.sm.ext != NULL) {
                    memcpy(newext, snaps->data.sm.ext,
                           snaps->data.sm.ext_entries*sizeof(*newext));
                    global_free(snaps->data.sm.ext, old_cap*sizeof(*newext),
                                HEAPSTAT_STALENESS);
                }
                snaps->data.sm.ext = newext;
            }
            snaps->data.sm.ext[snaps->data.sm.ext_entries].cstack_id = cstack_id;
            snaps->data.sm.ext[snaps->data.sm.ext_entries].bytes_asked_for =
                bytes_asked_for;
            snaps->data.sm.main[snaps->idx].u.ext_idx = snaps->data.sm.ext_entries;
            snaps->data.sm.ext_entries++;
        }
    }
    LOG(3, "\tadding "PFX"-"PFX" stamp %"INT64_FORMAT"u to snapshot idx %d\n",
        info->base, info->base + info->request_size, spa->last_access, snaps->idx);
    snaps->idx++;
    return true;
}
Exemplo n.º 21
0
/*
 *----------------------------------------------------------------------
 *
 * BalloonUnlockBatched --
 *
 *      Unlock all the batched page, previously stored by
 *      BalloonAddPageBatched.
 *
 * Results:
 *      BALLOON_SUCCESS or an error code. On success, *target is filled
 *      with the balloon target.
 *
 * Side effects:
 *      None.
 *
 *----------------------------------------------------------------------
 */
static int
BalloonUnlockBatched(Balloon *b,       // IN/OUT
                     uint16 nEntries,  // IN
                     int isLargePages, // IN
                     uint32 *target)   // OUT
{
   uint32 i;
   int status = BALLOON_SUCCESS;
   uint32 nUnlockedEntries;
   PPN64 batchPagePPN;
   BalloonChunk *chunk = NULL;

   batchPagePPN = PA_2_PPN(OS_ReservedPageGetPA(b->pageHandle));
   status = Backdoor_MonitorUnlockPagesBatched(b, batchPagePPN, nEntries,
                                               isLargePages, target);

   if (status != BALLOON_SUCCESS) {
      for (i = 0; i < nEntries; i++) {
         PA64 pa = Balloon_BatchGetPA(b->batchPage, i);
         PageHandle handle = OS_ReservedPageGetHandle(pa);

         chunk = BalloonGetChunkOrFallback(b, isLargePages);
         BalloonPageStore(chunk, handle);
      }
      goto out;
   }

   nUnlockedEntries = 0;
   for (i = 0; i < nEntries; i++) {
      int status = Balloon_BatchGetStatus(b->batchPage, i);
      PA64 pa = Balloon_BatchGetPA(b->batchPage, i);
      PageHandle handle = OS_ReservedPageGetHandle(pa);

      if (status != BALLOON_SUCCESS) {
         chunk = BalloonGetChunkOrFallback(b, isLargePages);
         BalloonPageStore(chunk, handle);
         continue;
      }

      OS_ReservedPageFree(handle, isLargePages);
      STATS_INC(b->stats.primFree[isLargePages]);

      nUnlockedEntries++;
   }

   if (isLargePages) {
      b->nPages -= nUnlockedEntries * OS_LARGE_2_SMALL_PAGES;
   } else {
      b->nPages -= nUnlockedEntries;
   }

out:
   if (b->fallbackChunk != NULL) {
      BalloonChunk_Destroy(b->fallbackChunk);
      b->fallbackChunk = NULL;
   }
   return status;
}
Exemplo n.º 22
0
/**
 * Destroys a mailbox.
 *
 * @param mbox is the mailbox to be destroyed.
 */
void
sys_mbox_free(sys_mbox_t mbox)
{
  unsigned portBASE_TYPE count;

  /* There should not be any messages waiting (if there are it is a bug).  If
     any are waiting, increment the mailbox error count. */
#if RTOS_SAFERTOS
  if((xQueueMessagesWaiting(mbox->queue, &count) != pdPASS) || (count != 0)) {
#elif RTOS_FREERTOS
  if(uxQueueMessagesWaiting(mbox->queue) != 0) {
#endif /* RTOS_SAFERTOS */

#if SYS_STATS
    STATS_INC(sys.mbox.err);
#endif /* SYS_STATS */
  }

  /* Clear the queue handle. */
  mbox->queue = 0;

  /* Update the mailbox statistics. */
#if SYS_STATS
   STATS_DEC(sys.mbox.used);
#endif /* SYS_STATS */
}

/**
 * The routine for a thread.  This handles some housekeeping around the
 * applications's thread routine.
 *
 * @param arg is the index into the thread structure for this thread
 */
static void
sys_arch_thread(void *arg)
{
  u32_t i;

  /* Get this threads index. */
  i = (u32_t)arg;

  /* Call the application's thread routine. */
  threads[i].thread(threads[i].arg);

  /* Free the memory used by this thread's stack. */
  mem_free(threads[i].stackstart);

  /* Clear the stack from the thread structure. */
  threads[i].stackstart = NULL;
  threads[i].stackend = NULL;

  /* Delete this task. */
#if RTOS_SAFERTOS
  xTaskDelete(NULL);
#elif RTOS_FREERTOS
  vTaskDelete(NULL);
#endif
}
Exemplo n.º 23
0
/**
 * Counts the number of advertising PDU's received, by type. For advertising
 * PDU's that contain a destination address, we still count these packets even
 * if they are not for us.
 *
 * @param pdu_type
 */
static void
ble_ll_count_rx_adv_pdus(uint8_t pdu_type)
{
    /* Count received packet types  */
    switch (pdu_type) {
    case BLE_ADV_PDU_TYPE_ADV_IND:
        STATS_INC(ble_ll_stats, rx_adv_ind);
        break;
    case BLE_ADV_PDU_TYPE_ADV_DIRECT_IND:
        STATS_INC(ble_ll_stats, rx_adv_direct_ind);
        break;
    case BLE_ADV_PDU_TYPE_ADV_NONCONN_IND:
        STATS_INC(ble_ll_stats, rx_adv_nonconn_ind);
        break;
    case BLE_ADV_PDU_TYPE_SCAN_REQ:
        STATS_INC(ble_ll_stats, rx_scan_reqs);
        break;
    case BLE_ADV_PDU_TYPE_SCAN_RSP:
        STATS_INC(ble_ll_stats, rx_scan_rsps);
        break;
    case BLE_ADV_PDU_TYPE_CONNECT_REQ:
        STATS_INC(ble_ll_stats, rx_connect_reqs);
        break;
    case BLE_ADV_PDU_TYPE_ADV_SCAN_IND:
        STATS_INC(ble_ll_stats, rx_scan_ind);
        break;
    default:
        break;
    }
}
Exemplo n.º 24
0
/**
 * Read multiple bytes starting from specified register over SPI
 *
 * @param The sensor interface
 * @param The register address start reading from
 * @param Pointer to where the register value should be written
 * @param Number of bytes to read
 *
 * @return 0 on success, non-zero on failure
 */
int
adxl345_spi_readlen(struct sensor_itf *itf, uint8_t reg, uint8_t *buffer,
                    uint8_t len)
{
    int i;
    uint16_t retval;
    int rc = 0;

    /* Select the device */
    hal_gpio_write(itf->si_cs_pin, 0);

    /* Send the address */
    retval = hal_spi_tx_val(itf->si_num, reg | ADXL345_SPI_READ_CMD_BIT
                            | ADXL345_SPI_MULTIBYTE_CMD_BIT);
    
    if (retval == 0xFFFF) {
        rc = SYS_EINVAL;
        ADXL345_LOG(ERROR, "SPI_%u register write failed addr:0x%02X\n",
                    itf->si_num, reg);
        STATS_INC(g_adxl345stats, read_errors);
        goto err;
    }

    for (i = 0; i < len; i++) {
        /* Read data */
        retval = hal_spi_tx_val(itf->si_num, 0);
        if (retval == 0xFFFF) {
            rc = SYS_EINVAL;
            ADXL345_LOG(ERROR, "SPI_%u read failed addr:0x%02X\n",
                        itf->si_num, reg);
            STATS_INC(g_adxl345stats, read_errors);
            goto err;
        }
        buffer[i] = retval;
    }

err:
    /* De-select the device */
    hal_gpio_write(itf->si_cs_pin, 1);

    return rc;
}
Exemplo n.º 25
0
/**
 * Allocate a pdu (chain) for reception.
 *
 * @param len
 *
 * @return struct os_mbuf*
 */
struct os_mbuf *
ble_ll_rxpdu_alloc(uint16_t len)
{
    uint16_t mb_bytes;
    struct os_mbuf *m;
    struct os_mbuf *n;
    struct os_mbuf *p;
    struct os_mbuf_pkthdr *pkthdr;

    p = os_msys_get_pkthdr(len, sizeof(struct ble_mbuf_hdr));
    if (!p) {
        goto rxpdu_alloc_exit;
    }

    /* Set packet length */
    pkthdr = OS_MBUF_PKTHDR(p);
    pkthdr->omp_len = len;

    /*
     * NOTE: first mbuf in chain will have data pre-pended to it so we adjust
     * m_data by a word.
     */
    p->om_data += 4;
    mb_bytes = (p->om_omp->omp_databuf_len - p->om_pkthdr_len - 4);

    if (mb_bytes < len) {
        n = p;
        len -= mb_bytes;
        while (len) {
            m = os_msys_get(len, 0);
            if (!m) {
                os_mbuf_free_chain(p);
                p = NULL;
                goto rxpdu_alloc_exit;
            }
            /* Chain new mbuf to existing chain */
            SLIST_NEXT(n, om_next) = m;
            n = m;
            mb_bytes = m->om_omp->omp_databuf_len;
            if (mb_bytes >= len) {
                len = 0;
            } else {
                len -= mb_bytes;
            }
        }
    }


rxpdu_alloc_exit:
    if (!p) {
        STATS_INC(ble_ll_stats, no_bufs);
    }
    return p;
}
Exemplo n.º 26
0
/**
 * ll rx pkt in
 *
 * Process received packet from PHY.
 *
 * Context: Link layer task
 *
 */
static void
ble_ll_rx_pkt_in(void)
{
    os_sr_t sr;
    uint8_t pdu_type;
    uint8_t *rxbuf;
    struct os_mbuf_pkthdr *pkthdr;
    struct ble_mbuf_hdr *ble_hdr;
    struct os_mbuf *m;

    /* Drain all packets off the queue */
    while (STAILQ_FIRST(&g_ble_ll_data.ll_rx_pkt_q)) {
        /* Get mbuf pointer from packet header pointer */
        pkthdr = STAILQ_FIRST(&g_ble_ll_data.ll_rx_pkt_q);
        m = (struct os_mbuf *)((uint8_t *)pkthdr - sizeof(struct os_mbuf));

        /* Remove from queue */
        OS_ENTER_CRITICAL(sr);
        STAILQ_REMOVE_HEAD(&g_ble_ll_data.ll_rx_pkt_q, omp_next);
        OS_EXIT_CRITICAL(sr);

        /* Note: pdu type wont get used unless this is an advertising pdu */
        ble_hdr = BLE_MBUF_HDR_PTR(m);
        rxbuf = m->om_data;
        pdu_type = rxbuf[0] & BLE_ADV_PDU_HDR_TYPE_MASK;
        ble_ll_count_rx_stats(ble_hdr, pkthdr->omp_len, pdu_type);

        /* Process the data or advertising pdu */
        if (ble_hdr->rxinfo.channel < BLE_PHY_NUM_DATA_CHANS) {
            ble_ll_conn_rx_data_pdu(m, ble_hdr);
        } else {
            /* Process the PDU */
            switch (BLE_MBUF_HDR_RX_STATE(ble_hdr)) {
            case BLE_LL_STATE_ADV:
                ble_ll_adv_rx_pkt_in(pdu_type, rxbuf, ble_hdr);
                break;
            case BLE_LL_STATE_SCANNING:
                ble_ll_scan_rx_pkt_in(pdu_type, rxbuf, ble_hdr);
                break;
            case BLE_LL_STATE_INITIATING:
                ble_ll_init_rx_pkt_in(rxbuf, ble_hdr);
                break;
            default:
                /* Any other state should never occur */
                STATS_INC(ble_ll_stats, bad_ll_state);
                break;
            }

            /* Free the packet buffer */
            os_mbuf_free_chain(m);
        }
    }
}
/**
 * Called when the LL receives a scan request or connection request
 *
 * Context: Called from interrupt context.
 *
 * @param rxbuf
 *
 * @return -1: request not for us or is a connect request.
 *          0: request (scan) is for us and we successfully went from rx to tx.
 *        > 0: PHY error attempting to go from rx to tx.
 */
static int
ble_ll_adv_rx_req(uint8_t pdu_type, struct os_mbuf *rxpdu)
{
    int rc;
    uint8_t chk_whitelist;
    uint8_t txadd;
    uint8_t *rxbuf;
    struct ble_mbuf_hdr *ble_hdr;
    struct ble_ll_adv_sm *advsm;

    rxbuf = rxpdu->om_data;
    if (ble_ll_adv_addr_cmp(rxbuf)) {
        return -1;
    }

    /* Set device match bit if we are whitelisting */
    advsm = &g_ble_ll_adv_sm;
    if (pdu_type == BLE_ADV_PDU_TYPE_SCAN_REQ) {
        chk_whitelist = advsm->adv_filter_policy & 1;
    } else {
        chk_whitelist = advsm->adv_filter_policy & 2;
    }

    /* Set device match bit if we are whitelisting */
    ble_hdr = BLE_MBUF_HDR_PTR(rxpdu);
    if (chk_whitelist) {
        /* Get the scanners address type */
        if (rxbuf[0] & BLE_ADV_PDU_HDR_TXADD_MASK) {
            txadd = BLE_ADDR_TYPE_RANDOM;
        } else {
            txadd = BLE_ADDR_TYPE_PUBLIC;
        }

        /* Check for whitelist match */
        if (!ble_ll_whitelist_match(rxbuf + BLE_LL_PDU_HDR_LEN, txadd)) {
            return -1;
        }
        ble_hdr->rxinfo.flags |= BLE_MBUF_HDR_F_DEVMATCH;
    }

    /* Setup to transmit the scan response if appropriate */
    rc = -1;
    if (pdu_type == BLE_ADV_PDU_TYPE_SCAN_REQ) {
        ble_phy_set_txend_cb(ble_ll_adv_tx_done, &g_ble_ll_adv_sm);
        rc = ble_phy_tx(advsm->scan_rsp_pdu, BLE_PHY_TRANSITION_NONE);
        if (!rc) {
            ble_hdr->rxinfo.flags |= BLE_MBUF_HDR_F_SCAN_RSP_TXD;
            STATS_INC(ble_ll_stats, scan_rsp_txg);
        }
    }

    return rc;
}
Exemplo n.º 28
0
bool PtexReader::readBlock(void* data, int size, bool reporterror)
{
    int result = _io->read(data, size, _fp);
    if (result == size) {
        _pos += size;
        STATS_INC(nblocksRead);
        STATS_ADD(nbytesRead, size);
        return 1;
    }
    if (reporterror)
        setError("PtexReader error: read failed (EOF)");
    return 0;
}
Exemplo n.º 29
0
/**
 * Reads the specified area from disk and loads its contents into the RAM
 * representation.
 *
 * @param area_idx              The index of the area to read.
 *
 * @return                      0 on success; nonzero on failure.
 */
static int
nffs_restore_area_contents(int area_idx)
{
    struct nffs_disk_object disk_object;
    struct nffs_area *area;
    int rc;

    area = nffs_areas + area_idx;

    area->na_cur = sizeof (struct nffs_disk_area);
    while (1) {
        rc = nffs_restore_disk_object(area_idx, area->na_cur,  &disk_object);
        switch (rc) {
        case 0:

            /* Valid object; restore it into the RAM representation. */
            rc = nffs_restore_object(&disk_object);

            /*
             * If the restore fails the CRC check, the object length field
             * can't be trusted so just start looking for the next valid
             * object in the flash area.
             * XXX Deal with file system corruption
             */
            if (rc == FS_ECORRUPT) {
                area->na_cur++;
            } else {
                STATS_INC(nffs_stats, nffs_object_count); /* restored objects */
                area->na_cur += nffs_restore_disk_object_size(&disk_object);
            }
            break;

        case FS_ECORRUPT:
            /*
             * Invalid object; keep scanning for a valid object ID and CRC
             * Can nffs_restore_disk_object return FS_ECORRUPT? XXX
             */
            area->na_cur++;
            break;

        case FS_EEMPTY:
        case FS_EOFFSET:
            /* End of disk encountered; area fully restored. */
            return 0;

        default:
            return rc;
        }
    }
}
Exemplo n.º 30
0
/**
 * Reads a single byte from the specified register using SPI
 *

 * @param The register address to read from
 * @param Pointer to where the register value should be written
 *
 * @return 0 on success, non-zero error on failure.
 */
int
adxl345_spi_read8(struct sensor_itf *itf, uint8_t reg, uint8_t *value)
{
    uint16_t retval;
    int rc = 0;

    /* Select the device */
    hal_gpio_write(itf->si_cs_pin, 0);

    /* Send the address */
    retval = hal_spi_tx_val(itf->si_num, reg | ADXL345_SPI_READ_CMD_BIT);
    
    if (retval == 0xFFFF) {
        rc = SYS_EINVAL;
        ADXL345_LOG(ERROR, "SPI_%u register write failed addr:0x%02X\n",
                    itf->si_num, reg);
        STATS_INC(g_adxl345stats, read_errors);
        goto err;
    }

    /* Read data */
    retval = hal_spi_tx_val(itf->si_num, 0);
    if (retval == 0xFFFF) {
        rc = SYS_EINVAL;
        ADXL345_LOG(ERROR, "SPI_%u read failed addr:0x%02X\n",
                     itf->si_num, reg);
        STATS_INC(g_adxl345stats, read_errors);
        goto err;
    }
    *value = retval;
    
err:
    /* De-select the device */
    hal_gpio_write(itf->si_cs_pin, 1);

    return rc;
}