static inline bool telnet_handle_command(struct net_pkt *pkt) { NET_PKT_DATA_ACCESS_CONTIGUOUS_DEFINE(cmd_access, struct telnet_simple_command); struct telnet_simple_command *cmd; cmd = (struct telnet_simple_command *)net_pkt_get_data_new(pkt, &cmd_access); if (!cmd || cmd->iac != NVT_CMD_IAC) { return false; } #ifdef CONFIG_TELNET_CONSOLE_SUPPORT_COMMAND LOG_DBG("Got a command %u/%u/%u", cmd->iac, cmd->op, cmd->opt); if (!k_sem_take(&cmd_lock, K_NO_WAIT)) { telnet_command_cpy(&telnet_cmd, cmd); k_sem_give(&cmd_lock); k_sem_give(&send_lock); } #endif /* CONFIG_TELNET_CONSOLE_SUPPORT_COMMAND */ return true; }
static int flash_stm32_write_protection(struct device *dev, bool enable) { struct flash_stm32_priv *p = dev->driver_data; #if defined(CONFIG_SOC_SERIES_STM32F4X) struct stm32f4x_flash *regs = p->regs; #elif defined(CONFIG_SOC_SERIES_STM32L4X) struct stm32l4x_flash *regs = p->regs; #endif int rc = 0; k_sem_take(&p->sem, K_FOREVER); if (enable) { rc = flash_stm32_wait_flash_idle(p); if (rc) { k_sem_give(&p->sem); return rc; } regs->cr |= FLASH_CR_LOCK; } else { if (regs->cr & FLASH_CR_LOCK) { regs->keyr = FLASH_KEY1; regs->keyr = FLASH_KEY2; } } k_sem_give(&p->sem); return rc; }
static void tThread_entry(void *p1, void *p2, void *p3) { tstack_pop((struct k_stack *)p1); k_sem_give(&end_sema); tstack_push((struct k_stack *)p1); k_sem_give(&end_sema); }
void RegressionTask(void *arg1, void *arg2, void *arg3) { u32_t nCalls = 0; ARG_UNUSED(arg1); ARG_UNUSED(arg2); ARG_UNUSED(arg3); k_sem_give(&ALT_SEM); /* Activate AlternateTask() */ nCalls = criticalLoop(nCalls); /* Wait for AlternateTask() to complete */ zassert_true(k_sem_take(®RESS_SEM, TEST_TIMEOUT) == 0, "Timed out waiting for REGRESS_SEM"); zassert_equal(criticalVar, nCalls + altTaskIterations, "Unexpected value for <criticalVar>"); k_sched_time_slice_set(10, 10); k_sem_give(&ALT_SEM); /* Re-activate AlternateTask() */ nCalls = criticalLoop(nCalls); /* Wait for AlternateTask() to finish */ zassert_true(k_sem_take(®RESS_SEM, TEST_TIMEOUT) == 0, "Timed out waiting for REGRESS_SEM"); zassert_equal(criticalVar, nCalls + altTaskIterations, "Unexpected value for <criticalVar>"); k_sem_give(&TEST_SEM); }
static void tThread_entry(void *p1, void *p2, void *p3) { tpipe_get((struct k_pipe *)p1); k_sem_give(&end_sema); tpipe_put((struct k_pipe *)p1); k_sem_give(&end_sema); }
void test_sema_reset(void) { k_sem_init(&sema, SEM_INITIAL, SEM_LIMIT); k_sem_give(&sema); k_sem_reset(&sema); zassert_false(k_sem_count_get(&sema), NULL); /**TESTPOINT: sem take return -EBUSY*/ zassert_equal(k_sem_take(&sema, K_NO_WAIT), -EBUSY, NULL); /**TESTPOINT: sem take return -EAGAIN*/ zassert_equal(k_sem_take(&sema, TIMEOUT), -EAGAIN, NULL); k_sem_give(&sema); zassert_false(k_sem_take(&sema, K_FOREVER), NULL); }
void helper_task(void) { k_sem_take(&HELPER_SEM, K_FOREVER); k_sem_give(®RESS_SEM); k_mem_pool_free(&helper_block); }
void thread_sem1_give_test(void *p1, void *p2, void *p3) { k_sem_give(&sem_bench); /* sync the 2 threads*/ k_sem_take(&sem_bench_1, 1000); /* clear the previous sem_give*/ /* test_time1 = OS_GET_TIME(); */ }
static int aon_timer_qmsi_start(struct device *dev) { qm_aonpt_config_t qmsi_cfg; int result = 0; user_cb = NULL; qmsi_cfg.callback = NULL; qmsi_cfg.int_en = false; /* AONPT is a countdown timer. So, set the initial value to * the maximum value. */ qmsi_cfg.count = 0xffffffff; qmsi_cfg.callback_data = NULL; if (IS_ENABLED(CONFIG_AON_API_REENTRANCY)) { k_sem_take(RP_GET(dev), K_FOREVER); } if (qm_aonpt_set_config(QM_AONC_0, &qmsi_cfg)) { result = -EIO; } if (IS_ENABLED(CONFIG_AON_API_REENTRANCY)) { k_sem_give(RP_GET(dev)); } return result; }
int pool_block_get_wait_test(void) { int rv; rv = k_mem_pool_alloc(&POOL_ID, &block_list[0], 3000, K_FOREVER); if (rv != 0) { TC_ERROR("k_mem_pool_alloc(3000) expected %d, got %d\n", 0, rv); return TC_FAIL; } k_sem_give(&ALTERNATE_SEM); /* Wake alternate_task */ evidence = 0; rv = k_mem_pool_alloc(&POOL_ID, &block_list[1], 128, K_FOREVER); if (rv != 0) { TC_ERROR("k_mem_pool_alloc(128) expected %d, got %d\n", 0, rv); return TC_FAIL; } switch (evidence) { case 0: TC_ERROR("k_mem_pool_alloc(128) did not block!\n"); return TC_FAIL; case 1: break; case 2: default: TC_ERROR("Rescheduling did not occur " "after k_mem_pool_free()\n"); return TC_FAIL; } k_mem_pool_free(&block_list[1]); return TC_PASS; }
static inline void telnet_reply_command(void) { if (k_sem_take(&cmd_lock, K_NO_WAIT)) { return; } if (!telnet_cmd.iac) { goto out; } switch (telnet_cmd.op) { case NVT_CMD_AO: /* OK, no output then */ __printk_hook_install(telnet_console_out_nothing); telnet_rb_init(); break; case NVT_CMD_AYT: telnet_reply_ay_command(); break; case NVT_CMD_DO: telnet_reply_do_command(); break; default: SYS_LOG_DBG("Operation %u not handled", telnet_cmd.op); break; } telnet_cmd.iac = NVT_NUL; telnet_cmd.op = NVT_NUL; telnet_cmd.opt = NVT_NUL; out: k_sem_give(&cmd_lock); }
static int aon_timer_qmsi_set_alarm(struct device *dev, counter_callback_t callback, u32_t count, void *user_data) { qm_aonpt_config_t qmsi_cfg; int result = 0; /* Check if timer has been started */ if (QM_AONC[QM_AONC_0]->aonpt_cfg == 0) { return -ENOTSUP; } user_cb = callback; qmsi_cfg.callback = aonpt_int_callback; qmsi_cfg.int_en = true; qmsi_cfg.count = count; qmsi_cfg.callback_data = user_data; if (IS_ENABLED(CONFIG_AON_API_REENTRANCY)) { k_sem_take(RP_GET(dev), K_FOREVER); } if (qm_aonpt_set_config(QM_AONC_0, &qmsi_cfg)) { user_cb = NULL; result = -EIO; } if (IS_ENABLED(CONFIG_AON_API_REENTRANCY)) { k_sem_give(RP_GET(dev)); } return result; }
static int entropy_nrf5_get_entropy(struct device *device, u8_t *buf, u16_t len) { /* Check if this API is called on correct driver instance. */ __ASSERT_NO_MSG(&entropy_nrf5_data == DEV_DATA(device)); while (len) { u16_t bytes; k_sem_take(&entropy_nrf5_data.sem_lock, K_FOREVER); bytes = rng_pool_get((struct rng_pool *)(entropy_nrf5_data.thr), buf, len); k_sem_give(&entropy_nrf5_data.sem_lock); if (bytes == 0) { /* Pool is empty: Sleep until next interrupt. */ k_sem_take(&entropy_nrf5_data.sem_sync, K_FOREVER); continue; } len -= bytes; buf += bytes; } return 0; }
static void ipm_console_receive_callback(void *context, u32_t id, volatile void *data) { struct device *d; struct ipm_console_receiver_runtime_data *driver_data; int ret; ARG_UNUSED(data); d = context; driver_data = d->driver_data; /* Should always be at least one free buffer slot */ ret = ring_buf_item_put(&driver_data->rb, 0, id, NULL, 0); __ASSERT(ret == 0, "Failed to insert data into ring buffer"); k_sem_give(&driver_data->sem); /* If the buffer is now full, disable future interrupts for this channel * until the thread has a chance to consume characters. * * This works without losing data if the sending side tries to send * more characters because the sending side is making an ipm_send() * call with the wait flag enabled. It blocks until the receiver side * re-enables the channel and consumes the data. */ if (ring_buf_space_get(&driver_data->rb) == 0) { ipm_set_enabled(driver_data->ipm_device, 0); driver_data->channel_disabled = 1; } }
static int fxos8700_sample_fetch(struct device *dev, enum sensor_channel chan) { const struct fxos8700_config *config = dev->config->config_info; struct fxos8700_data *data = dev->driver_data; u8_t buffer[FXOS8700_MAX_NUM_BYTES]; u8_t num_bytes; s16_t *raw; int ret = 0; int i; if (chan != SENSOR_CHAN_ALL) { LOG_ERR("Unsupported sensor channel"); return -ENOTSUP; } k_sem_take(&data->sem, K_FOREVER); /* Read all the channels in one I2C transaction. The number of bytes to * read and the starting register address depend on the mode * configuration (accel-only, mag-only, or hybrid). */ num_bytes = config->num_channels * FXOS8700_BYTES_PER_CHANNEL_NORMAL; __ASSERT(num_bytes <= sizeof(buffer), "Too many bytes to read"); if (i2c_burst_read(data->i2c, config->i2c_address, config->start_addr, buffer, num_bytes)) { LOG_ERR("Could not fetch sample"); ret = -EIO; goto exit; } /* Parse the buffer into raw channel data (16-bit integers). To save * RAM, store the data in raw format and wait to convert to the * normalized sensor_value type until later. */ __ASSERT(config->start_channel + config->num_channels <= ARRAY_SIZE(data->raw), "Too many channels"); raw = &data->raw[config->start_channel]; for (i = 0; i < num_bytes; i += 2) { *raw++ = (buffer[i] << 8) | (buffer[i+1]); } #ifdef CONFIG_FXOS8700_TEMP if (i2c_reg_read_byte(data->i2c, config->i2c_address, FXOS8700_REG_TEMP, &data->temp)) { LOG_ERR("Could not fetch temperature"); ret = -EIO; goto exit; } #endif exit: k_sem_give(&data->sem); return ret; }
/* a thread busy waits, then reports through a fifo */ static void test_busy_wait(void *mseconds, void *arg2, void *arg3) { u32_t usecs; ARG_UNUSED(arg2); ARG_UNUSED(arg3); usecs = (int)mseconds * 1000; TC_PRINT("Thread busy waiting for %d usecs\n", usecs); k_busy_wait(usecs); TC_PRINT("Thread busy waiting completed\n"); /* * Ideally the test should verify that the correct number of ticks * have elapsed. However, when running under QEMU, the tick interrupt * may be processed on a very irregular basis, meaning that far * fewer than the expected number of ticks may occur for a given * number of clock cycles vs. what would ordinarily be expected. * * Consequently, the best we can do for now to test busy waiting is * to invoke the API and verify that it returns. (If it takes way * too long, or never returns, the main test task may be able to * time out and report an error.) */ k_sem_give(&reply_timeout); }
static int sender_iface(struct net_if *iface, struct net_pkt *pkt) { if (!pkt->frags) { DBG("No data to send!\n"); return -ENODATA; } if (test_started) { struct net_if_test *data = iface->dev->driver_data; DBG("Sending at iface %d %p\n", net_if_get_by_iface(iface), iface); if (net_pkt_iface(pkt) != iface) { DBG("Invalid interface %p, expecting %p\n", net_pkt_iface(pkt), iface); test_failed = true; } if (net_if_get_by_iface(iface) != data->idx) { DBG("Invalid interface %d index, expecting %d\n", data->idx, net_if_get_by_iface(iface)); test_failed = true; } } net_pkt_unref(pkt); k_sem_give(&wait_data); return 0; }
/* * Reset TX queue when errors are detected */ static void tx_error_handler(Gmac *gmac, struct gmac_queue *queue) { struct net_pkt *pkt; struct ring_buf *tx_frames = &queue->tx_frames; queue->err_tx_flushed_count++; /* Stop transmission, clean transmit pipeline and control registers */ gmac->GMAC_NCR &= ~GMAC_NCR_TXEN; /* Free all pkt resources in the TX path */ while (tx_frames->tail != tx_frames->head) { /* Release net buffer to the buffer pool */ pkt = UINT_TO_POINTER(tx_frames->buf[tx_frames->tail]); net_pkt_unref(pkt); SYS_LOG_DBG("Dropping pkt %p", pkt); MODULO_INC(tx_frames->tail, tx_frames->len); } /* Reinitialize TX descriptor list */ k_sem_reset(&queue->tx_desc_sem); tx_descriptors_init(gmac, queue); for (int i = 0; i < queue->tx_desc_list.len - 1; i++) { k_sem_give(&queue->tx_desc_sem); } /* Restart transmission */ gmac->GMAC_NCR |= GMAC_NCR_TXEN; }
static void ssl_sent(struct net_context *context, int status, void *token, void *user_data) { struct http_client_ctx *http_ctx = user_data; k_sem_give(&http_ctx->https.mbedtls.ssl_ctx.tx_sem); }
/* * Process successfully sent packets */ static void tx_completed(Gmac *gmac, struct gmac_queue *queue) { struct gmac_desc_list *tx_desc_list = &queue->tx_desc_list; struct gmac_desc *tx_desc; struct net_pkt *pkt; __ASSERT(tx_desc_list->buf[tx_desc_list->tail].w1 & GMAC_TXW1_USED, "first buffer of a frame is not marked as own by GMAC"); while (tx_desc_list->tail != tx_desc_list->head) { tx_desc = &tx_desc_list->buf[tx_desc_list->tail]; MODULO_INC(tx_desc_list->tail, tx_desc_list->len); k_sem_give(&queue->tx_desc_sem); if (tx_desc->w1 & GMAC_TXW1_LASTBUFFER) { /* Release net buffer to the buffer pool */ pkt = UINT_TO_POINTER(ring_buf_get(&queue->tx_frames)); net_pkt_unref(pkt); SYS_LOG_DBG("Dropping pkt %p", pkt); break; } } }
void nrf_drv_radio802154_transmitted(bool pending_bit) { ARG_UNUSED(pending_bit); nrf5_data.tx_success = true; k_sem_give(&nrf5_data.tx_wait); }
static void dns_cb(enum dns_resolve_status status, struct dns_addrinfo *info, void *user_data) { struct waiter *waiter = user_data; struct http_client_ctx *ctx = waiter->ctx; if (!(status == DNS_EAI_INPROGRESS && info)) { return; } if (info->ai_family == AF_INET) { #if defined(CONFIG_NET_IPV4) net_ipaddr_copy(&net_sin(&ctx->tcp.remote)->sin_addr, &net_sin(&info->ai_addr)->sin_addr); #else goto out; #endif } else if (info->ai_family == AF_INET6) { #if defined(CONFIG_NET_IPV6) net_ipaddr_copy(&net_sin6(&ctx->tcp.remote)->sin6_addr, &net_sin6(&info->ai_addr)->sin6_addr); #else goto out; #endif } else { goto out; } ctx->tcp.remote.family = info->ai_family; out: k_sem_give(&waiter->wait); }
static void time_slot_callback_work(u32_t ticks_at_expire, u32_t remainder, u16_t lazy, void *context) { struct flash_op_desc *op_desc; u8_t instance_index; u8_t ticker_id; int result; __ASSERT(ll_radio_state_is_idle(), "Radio is on during flash operation.\n"); op_desc = context; if (op_desc->handler(op_desc->context) == FLASH_OP_DONE) { ll_timeslice_ticker_id_get(&instance_index, &ticker_id); /* Stop the time slot ticker */ result = ticker_stop(instance_index, 0, ticker_id, NULL, NULL); if (result != TICKER_STATUS_SUCCESS && result != TICKER_STATUS_BUSY) { __ASSERT(0, "Failed to stop ticker.\n"); } ((struct flash_op_desc *)context)->result = 0; /* notify thread that data is available */ k_sem_give(&sem_sync); } }
int i2c_stm32_runtime_configure(struct device *dev, u32_t config) { const struct i2c_stm32_config *cfg = DEV_CFG(dev); struct i2c_stm32_data *data = DEV_DATA(dev); I2C_TypeDef *i2c = cfg->i2c; u32_t clock = 0U; int ret; #if defined(CONFIG_SOC_SERIES_STM32F3X) || defined(CONFIG_SOC_SERIES_STM32F0X) LL_RCC_ClocksTypeDef rcc_clocks; /* * STM32F0/3 I2C's independent clock source supports only * HSI and SYSCLK, not APB1. We force clock variable to * SYSCLK frequency. */ LL_RCC_GetSystemClocksFreq(&rcc_clocks); clock = rcc_clocks.SYSCLK_Frequency; #else clock_control_get_rate(device_get_binding(STM32_CLOCK_CONTROL_NAME), (clock_control_subsys_t *) &cfg->pclken, &clock); #endif /* CONFIG_SOC_SERIES_STM32F3X) || CONFIG_SOC_SERIES_STM32F0X */ data->dev_config = config; k_sem_take(&data->bus_mutex, K_FOREVER); LL_I2C_Disable(i2c); LL_I2C_SetMode(i2c, LL_I2C_MODE_I2C); ret = stm32_i2c_configure_timing(dev, clock); k_sem_give(&data->bus_mutex); return ret; }
static int fxas21002_channel_get(struct device *dev, enum sensor_channel chan, struct sensor_value *val) { const struct fxas21002_config *config = dev->config->config_info; struct fxas21002_data *data = dev->driver_data; int start_channel; int num_channels; s16_t *raw; int ret; int i; k_sem_take(&data->sem, K_FOREVER); /* Start with an error return code by default, then clear it if we find * a supported sensor channel. */ ret = -ENOTSUP; /* Convert raw gyroscope data to the normalized sensor_value type. */ switch (chan) { case SENSOR_CHAN_GYRO_X: start_channel = FXAS21002_CHANNEL_GYRO_X; num_channels = 1; break; case SENSOR_CHAN_GYRO_Y: start_channel = FXAS21002_CHANNEL_GYRO_Y; num_channels = 1; break; case SENSOR_CHAN_GYRO_Z: start_channel = FXAS21002_CHANNEL_GYRO_Z; num_channels = 1; break; case SENSOR_CHAN_GYRO_XYZ: start_channel = FXAS21002_CHANNEL_GYRO_X; num_channels = 3; break; default: start_channel = 0; num_channels = 0; break; } raw = &data->raw[start_channel]; for (i = 0; i < num_channels; i++) { fxas21002_convert(val++, *raw++, config->range); } if (num_channels > 0) { ret = 0; } if (ret != 0) { SYS_LOG_ERR("Unsupported sensor channel"); } k_sem_give(&data->sem); return ret; }
static void i2c_sam_twi_isr(void *arg) { struct device *dev = (struct device *)arg; const struct i2c_sam_twi_dev_cfg *const dev_cfg = DEV_CFG(dev); struct i2c_sam_twi_dev_data *const dev_data = DEV_DATA(dev); Twi *const twi = dev_cfg->regs; struct twi_msg *msg = &dev_data->msg; u32_t isr_status; /* Retrieve interrupt status */ isr_status = twi->TWI_SR & twi->TWI_IMR; /* Not Acknowledged */ if (isr_status & TWI_SR_NACK) { msg->twi_sr = isr_status; goto tx_comp; } /* Byte received */ if (isr_status & TWI_SR_RXRDY) { msg->buf[msg->idx++] = twi->TWI_RHR; if (msg->idx == msg->len - 1) { /* Send a STOP condition on the TWI */ twi->TWI_CR = TWI_CR_STOP; } } /* Byte sent */ if (isr_status & TWI_SR_TXRDY) { if (msg->idx == msg->len) { if (msg->flags & I2C_MSG_STOP) { /* Send a STOP condition on the TWI */ twi->TWI_CR = TWI_CR_STOP; /* Disable Transmit Ready interrupt */ twi->TWI_IDR = TWI_IDR_TXRDY; } else { /* Transmission completed */ goto tx_comp; } } else { twi->TWI_THR = msg->buf[msg->idx++]; } } /* Transmission completed */ if (isr_status & TWI_SR_TXCOMP) { goto tx_comp; } return; tx_comp: /* Disable all enabled interrupts */ twi->TWI_IDR = twi->TWI_IMR; /* We are done */ k_sem_give(&dev_data->sem); }
void defrag_task(void) { k_sem_take(&DEFRAG_SEM, K_FOREVER); /* Wait to be activated */ k_mem_pool_defrag(&POOL_ID); k_sem_give(®RESS_SEM); /* defrag_task is finished */ }
void nrf_drv_radio802154_received(u8_t *p_data, s8_t power, s8_t lqi) { nrf5_data.rx_psdu = p_data; nrf5_data.rssi = power; nrf5_data.lqi = lqi; k_sem_give(&nrf5_data.rx_wait); }
void thread_sem0_test(void *p1, void *p2, void *p3) { k_sem_take(&sem_bench, 10);/* To sync threads */ k_sem_give(&sem_bench); sem_count++; k_thread_abort(sem0_tid); }
void main(void) { int status = TC_FAIL; u32_t start_tick; u32_t end_tick; TC_START("Test kernel Sleep and Wakeup APIs\n"); test_objects_init(); test_thread_id = k_thread_create(&test_thread_data, test_thread_stack, THREAD_STACK, (k_thread_entry_t) test_thread, 0, 0, NULL, TEST_THREAD_PRIORITY, 0, 0); TC_PRINT("Test thread started: id = %p\n", test_thread_id); helper_thread_id = k_thread_create(&helper_thread_data, helper_thread_stack, THREAD_STACK, (k_thread_entry_t) helper_thread, 0, 0, NULL, HELPER_THREAD_PRIORITY, 0, 0); TC_PRINT("Helper thread started: id = %p\n", helper_thread_id); /* Activate test_thread */ k_sem_give(&test_thread_sem); /* Wait for test_thread to activate us */ k_sem_take(&task_sem, K_FOREVER); /* Wake the test fiber */ k_wakeup(test_thread_id); if (test_failure) { goto done_tests; } TC_PRINT("Testing kernel k_sleep()\n"); align_to_tick_boundary(); start_tick = k_uptime_get_32(); /* FIXME: one tick less to account for * one extra tick for _TICK_ALIGN in k_sleep*/ k_sleep(ONE_SECOND - TICKS_PER_MS); end_tick = k_uptime_get_32(); if (!sleep_time_valid(start_tick, end_tick, ONE_SECOND)) { TC_ERROR("k_sleep() slept for %d ticks, not %d\n", end_tick - start_tick, ONE_SECOND); goto done_tests; } status = TC_PASS; done_tests: TC_END_REPORT(status); }