void RegressionTask(void *arg1, void *arg2, void *arg3) { u32_t nCalls = 0; ARG_UNUSED(arg1); ARG_UNUSED(arg2); ARG_UNUSED(arg3); k_sem_give(&ALT_SEM); /* Activate AlternateTask() */ nCalls = criticalLoop(nCalls); /* Wait for AlternateTask() to complete */ zassert_true(k_sem_take(®RESS_SEM, TEST_TIMEOUT) == 0, "Timed out waiting for REGRESS_SEM"); zassert_equal(criticalVar, nCalls + altTaskIterations, "Unexpected value for <criticalVar>"); k_sched_time_slice_set(10, 10); k_sem_give(&ALT_SEM); /* Re-activate AlternateTask() */ nCalls = criticalLoop(nCalls); /* Wait for AlternateTask() to finish */ zassert_true(k_sem_take(®RESS_SEM, TEST_TIMEOUT) == 0, "Timed out waiting for REGRESS_SEM"); zassert_equal(criticalVar, nCalls + altTaskIterations, "Unexpected value for <criticalVar>"); k_sem_give(&TEST_SEM); }
static int entropy_nrf5_get_entropy(struct device *device, u8_t *buf, u16_t len) { /* Check if this API is called on correct driver instance. */ __ASSERT_NO_MSG(&entropy_nrf5_data == DEV_DATA(device)); while (len) { u16_t bytes; k_sem_take(&entropy_nrf5_data.sem_lock, K_FOREVER); bytes = rng_pool_get((struct rng_pool *)(entropy_nrf5_data.thr), buf, len); k_sem_give(&entropy_nrf5_data.sem_lock); if (bytes == 0) { /* Pool is empty: Sleep until next interrupt. */ k_sem_take(&entropy_nrf5_data.sem_sync, K_FOREVER); continue; } len -= bytes; buf += bytes; } return 0; }
static void helper_thread(int arg1, int arg2) { k_sem_take(&helper_thread_sem, K_FOREVER); /* Wake the test fiber */ k_wakeup(test_thread_id); k_sem_take(&helper_thread_sem, K_FOREVER); /* Wake the test fiber from an ISR */ irq_offload(irq_offload_isr, (void *)test_thread_id); }
static int i2c_qmsi_transfer(struct device *dev, struct i2c_msg *msgs, u8_t num_msgs, u16_t addr) { struct i2c_qmsi_driver_data *driver_data = GET_DRIVER_DATA(dev); qm_i2c_t instance = GET_CONTROLLER_INSTANCE(dev); int rc; __ASSERT_NO_MSG(msgs); if (!num_msgs) { return 0; } device_busy_set(dev); for (int i = 0; i < num_msgs; i++) { u8_t op = msgs[i].flags & I2C_MSG_RW_MASK; bool stop = (msgs[i].flags & I2C_MSG_STOP) == I2C_MSG_STOP; qm_i2c_transfer_t xfer = { 0 }; if (op == I2C_MSG_WRITE) { xfer.tx = msgs[i].buf; xfer.tx_len = msgs[i].len; } else { xfer.rx = msgs[i].buf; xfer.rx_len = msgs[i].len; } xfer.callback = transfer_complete; xfer.callback_data = dev; xfer.stop = stop; k_sem_take(&driver_data->sem, K_FOREVER); rc = qm_i2c_master_irq_transfer(instance, &xfer, addr); k_sem_give(&driver_data->sem); if (rc != 0) { device_busy_clear(dev); return -EIO; } /* Block current thread until the I2C transfer completes. */ k_sem_take(&driver_data->device_sync_sem, K_FOREVER); if (driver_data->transfer_status != 0) { device_busy_clear(dev); return -EIO; } } device_busy_clear(dev); return 0; }
void test_sema_reset(void) { k_sem_init(&sema, SEM_INITIAL, SEM_LIMIT); k_sem_give(&sema); k_sem_reset(&sema); zassert_false(k_sem_count_get(&sema), NULL); /**TESTPOINT: sem take return -EBUSY*/ zassert_equal(k_sem_take(&sema, K_NO_WAIT), -EBUSY, NULL); /**TESTPOINT: sem take return -EAGAIN*/ zassert_equal(k_sem_take(&sema, TIMEOUT), -EAGAIN, NULL); k_sem_give(&sema); zassert_false(k_sem_take(&sema, K_FOREVER), NULL); }
static int adc_sam_read(struct device *dev, struct adc_seq_table *seq_tbl) { const struct adc_sam_dev_cfg *dev_cfg = DEV_CFG(dev); struct adc_sam_dev_data *const dev_data = DEV_DATA(dev); Afec *const afec = dev_cfg->regs; u8_t channel; u32_t num_samples; k_sem_take(&dev_data->mutex_thread, K_FOREVER); dev_data->active_channels = 0; /* Enable chosen channels */ for (int i = 0; i < seq_tbl->num_entries; i++) { channel = seq_tbl->entries[i].channel_id; if (channel >= ADC_CHANNELS) { return -EINVAL; } /* Check and set number of requested samples */ num_samples = seq_tbl->entries[i].buffer_length / sizeof(u16_t); if (!num_samples) { return -EINVAL; } dev_data->samples[channel].length = num_samples; /* Set start of sample buffer */ dev_data->samples[channel].buffer = (u16_t *)seq_tbl->entries[i].buffer; /* Enable channel */ dev_data->active_channels |= BIT(channel); } /* Enable chosen channels and their interrupts */ afec->AFEC_CHER = dev_data->active_channels; afec->AFEC_IER = dev_data->active_channels; /* Start conversions */ dev_data->measured_channels = 0; dev_data->active_chan_last = dev_data->active_channels; afec->AFEC_CR = AFEC_CR_START; k_sem_take(&dev_data->sem_meas, K_FOREVER); k_sem_give(&dev_data->mutex_thread); return 0; }
static int flash_stm32_write_protection(struct device *dev, bool enable) { struct flash_stm32_priv *p = dev->driver_data; #if defined(CONFIG_SOC_SERIES_STM32F4X) struct stm32f4x_flash *regs = p->regs; #elif defined(CONFIG_SOC_SERIES_STM32L4X) struct stm32l4x_flash *regs = p->regs; #endif int rc = 0; k_sem_take(&p->sem, K_FOREVER); if (enable) { rc = flash_stm32_wait_flash_idle(p); if (rc) { k_sem_give(&p->sem); return rc; } regs->cr |= FLASH_CR_LOCK; } else { if (regs->cr & FLASH_CR_LOCK) { regs->keyr = FLASH_KEY1; regs->keyr = FLASH_KEY2; } } k_sem_give(&p->sem); return rc; }
static int l2cap_chan_le_send(struct bt_l2cap_le_chan *ch, struct net_buf *buf, uint16_t sdu_hdr_len) { int len; /* Wait for credits */ if (k_sem_take(&ch->tx.credits, K_NO_WAIT)) { BT_DBG("No credits to transmit packet"); return -EAGAIN; } buf = l2cap_chan_create_seg(ch, buf, sdu_hdr_len); if (!buf) { return -ENOMEM; } /* Channel may have been disconnected while waiting for credits */ if (!ch->chan.conn) { net_buf_unref(buf); return -ECONNRESET; } BT_DBG("ch %p cid 0x%04x len %u credits %u", ch, ch->tx.cid, buf->len, k_sem_count_get(&ch->tx.credits)); len = buf->len; bt_l2cap_send(ch->chan.conn, ch->tx.cid, buf); return len; }
static inline void telnet_reply_command(void) { if (k_sem_take(&cmd_lock, K_NO_WAIT)) { return; } if (!telnet_cmd.iac) { goto out; } switch (telnet_cmd.op) { case NVT_CMD_AO: /* OK, no output then */ __printk_hook_install(telnet_console_out_nothing); telnet_rb_init(); break; case NVT_CMD_AYT: telnet_reply_ay_command(); break; case NVT_CMD_DO: telnet_reply_do_command(); break; default: SYS_LOG_DBG("Operation %u not handled", telnet_cmd.op); break; } telnet_cmd.iac = NVT_NUL; telnet_cmd.op = NVT_NUL; telnet_cmd.opt = NVT_NUL; out: k_sem_give(&cmd_lock); }
static int i2s_stm32_read(struct device *dev, void **mem_block, size_t *size) { struct i2s_stm32_data *const dev_data = DEV_DATA(dev); int ret; if (dev_data->rx.state == I2S_STATE_NOT_READY) { LOG_DBG("invalid state"); return -EIO; } if (dev_data->rx.state != I2S_STATE_ERROR) { ret = k_sem_take(&dev_data->rx.sem, dev_data->rx.cfg.timeout); if (ret < 0) { return ret; } } /* Get data from the beginning of RX queue */ ret = queue_get(&dev_data->rx.mem_block_queue, mem_block, size); if (ret < 0) { return -EIO; } return 0; }
static void sx9500_thread_main(int arg1, int unused) { struct device *dev = INT_TO_POINTER(arg1); struct sx9500_data *data = dev->driver_data; uint8_t reg_val; ARG_UNUSED(unused); while (1) { k_sem_take(&data->sem, K_FOREVER); if (i2c_reg_read_byte(data->i2c_master, data->i2c_slave_addr, SX9500_REG_IRQ_SRC, ®_val) < 0) { SYS_LOG_DBG("sx9500: error %d reading IRQ source register", ret); continue; } if ((reg_val & SX9500_CONV_DONE_IRQ) && data->handler_drdy) { data->handler_drdy(dev, &data->trigger_drdy); } if ((reg_val & SX9500_NEAR_FAR_IRQ) && data->handler_near_far) { data->handler_near_far(dev, &data->trigger_near_far); } } }
static int aon_timer_qmsi_set_alarm(struct device *dev, counter_callback_t callback, u32_t count, void *user_data) { qm_aonpt_config_t qmsi_cfg; int result = 0; /* Check if timer has been started */ if (QM_AONC[QM_AONC_0]->aonpt_cfg == 0) { return -ENOTSUP; } user_cb = callback; qmsi_cfg.callback = aonpt_int_callback; qmsi_cfg.int_en = true; qmsi_cfg.count = count; qmsi_cfg.callback_data = user_data; if (IS_ENABLED(CONFIG_AON_API_REENTRANCY)) { k_sem_take(RP_GET(dev), K_FOREVER); } if (qm_aonpt_set_config(QM_AONC_0, &qmsi_cfg)) { user_cb = NULL; result = -EIO; } if (IS_ENABLED(CONFIG_AON_API_REENTRANCY)) { k_sem_give(RP_GET(dev)); } return result; }
static int fxos8700_sample_fetch(struct device *dev, enum sensor_channel chan) { const struct fxos8700_config *config = dev->config->config_info; struct fxos8700_data *data = dev->driver_data; u8_t buffer[FXOS8700_MAX_NUM_BYTES]; u8_t num_bytes; s16_t *raw; int ret = 0; int i; if (chan != SENSOR_CHAN_ALL) { LOG_ERR("Unsupported sensor channel"); return -ENOTSUP; } k_sem_take(&data->sem, K_FOREVER); /* Read all the channels in one I2C transaction. The number of bytes to * read and the starting register address depend on the mode * configuration (accel-only, mag-only, or hybrid). */ num_bytes = config->num_channels * FXOS8700_BYTES_PER_CHANNEL_NORMAL; __ASSERT(num_bytes <= sizeof(buffer), "Too many bytes to read"); if (i2c_burst_read(data->i2c, config->i2c_address, config->start_addr, buffer, num_bytes)) { LOG_ERR("Could not fetch sample"); ret = -EIO; goto exit; } /* Parse the buffer into raw channel data (16-bit integers). To save * RAM, store the data in raw format and wait to convert to the * normalized sensor_value type until later. */ __ASSERT(config->start_channel + config->num_channels <= ARRAY_SIZE(data->raw), "Too many channels"); raw = &data->raw[config->start_channel]; for (i = 0; i < num_bytes; i += 2) { *raw++ = (buffer[i] << 8) | (buffer[i+1]); } #ifdef CONFIG_FXOS8700_TEMP if (i2c_reg_read_byte(data->i2c, config->i2c_address, FXOS8700_REG_TEMP, &data->temp)) { LOG_ERR("Could not fetch temperature"); ret = -EIO; goto exit; } #endif exit: k_sem_give(&data->sem); return ret; }
void helper_task(void) { k_sem_take(&HELPER_SEM, K_FOREVER); k_sem_give(®RESS_SEM); k_mem_pool_free(&helper_block); }
int i2c_stm32_runtime_configure(struct device *dev, u32_t config) { const struct i2c_stm32_config *cfg = DEV_CFG(dev); struct i2c_stm32_data *data = DEV_DATA(dev); I2C_TypeDef *i2c = cfg->i2c; u32_t clock = 0U; int ret; #if defined(CONFIG_SOC_SERIES_STM32F3X) || defined(CONFIG_SOC_SERIES_STM32F0X) LL_RCC_ClocksTypeDef rcc_clocks; /* * STM32F0/3 I2C's independent clock source supports only * HSI and SYSCLK, not APB1. We force clock variable to * SYSCLK frequency. */ LL_RCC_GetSystemClocksFreq(&rcc_clocks); clock = rcc_clocks.SYSCLK_Frequency; #else clock_control_get_rate(device_get_binding(STM32_CLOCK_CONTROL_NAME), (clock_control_subsys_t *) &cfg->pclken, &clock); #endif /* CONFIG_SOC_SERIES_STM32F3X) || CONFIG_SOC_SERIES_STM32F0X */ data->dev_config = config; k_sem_take(&data->bus_mutex, K_FOREVER); LL_I2C_Disable(i2c); LL_I2C_SetMode(i2c, LL_I2C_MODE_I2C); ret = stm32_i2c_configure_timing(dev, clock); k_sem_give(&data->bus_mutex); return ret; }
static inline bool telnet_handle_command(struct net_pkt *pkt) { NET_PKT_DATA_ACCESS_CONTIGUOUS_DEFINE(cmd_access, struct telnet_simple_command); struct telnet_simple_command *cmd; cmd = (struct telnet_simple_command *)net_pkt_get_data_new(pkt, &cmd_access); if (!cmd || cmd->iac != NVT_CMD_IAC) { return false; } #ifdef CONFIG_TELNET_CONSOLE_SUPPORT_COMMAND LOG_DBG("Got a command %u/%u/%u", cmd->iac, cmd->op, cmd->opt); if (!k_sem_take(&cmd_lock, K_NO_WAIT)) { telnet_command_cpy(&telnet_cmd, cmd); k_sem_give(&cmd_lock); k_sem_give(&send_lock); } #endif /* CONFIG_TELNET_CONSOLE_SUPPORT_COMMAND */ return true; }
static int aon_timer_qmsi_start(struct device *dev) { qm_aonpt_config_t qmsi_cfg; int result = 0; user_cb = NULL; qmsi_cfg.callback = NULL; qmsi_cfg.int_en = false; /* AONPT is a countdown timer. So, set the initial value to * the maximum value. */ qmsi_cfg.count = 0xffffffff; qmsi_cfg.callback_data = NULL; if (IS_ENABLED(CONFIG_AON_API_REENTRANCY)) { k_sem_take(RP_GET(dev), K_FOREVER); } if (qm_aonpt_set_config(QM_AONC_0, &qmsi_cfg)) { result = -EIO; } if (IS_ENABLED(CONFIG_AON_API_REENTRANCY)) { k_sem_give(RP_GET(dev)); } return result; }
void thread_sem1_give_test(void *p1, void *p2, void *p3) { k_sem_give(&sem_bench); /* sync the 2 threads*/ k_sem_take(&sem_bench_1, 1000); /* clear the previous sem_give*/ /* test_time1 = OS_GET_TIME(); */ }
STATIC mp_obj_t mod_getaddrinfo(size_t n_args, const mp_obj_t *args) { mp_obj_t host_in = args[0], port_in = args[1]; const char *host = mp_obj_str_get_str(host_in); mp_int_t family = 0; if (n_args > 2) { family = mp_obj_get_int(args[2]); } getaddrinfo_state_t state; // Just validate that it's int (void)mp_obj_get_int(port_in); state.port = port_in; state.result = mp_obj_new_list(0, NULL); k_sem_init(&state.sem, 0, UINT_MAX); for (int i = 2; i--;) { int type = (family != AF_INET6 ? DNS_QUERY_TYPE_A : DNS_QUERY_TYPE_AAAA); RAISE_ERRNO(dns_get_addr_info(host, type, NULL, dns_resolve_cb, &state, 3000)); k_sem_take(&state.sem, K_FOREVER); if (family != 0) { break; } family = AF_INET6; } // Raise error only if there's nothing to return, otherwise // it may be IPv4 vs IPv6 differences. mp_int_t len = MP_OBJ_SMALL_INT_VALUE(mp_obj_len(state.result)); if (state.status != 0 && len == 0) { mp_raise_OSError(state.status); } return state.result; }
static bool i2c_imx_write(struct device *dev, u8_t *txBuffer, u8_t txSize) { I2C_Type *base = DEV_BASE(dev); struct i2c_imx_data *data = DEV_DATA(dev); struct i2c_master_transfer *transfer = &data->transfer; transfer->isBusy = true; /* Clear I2C interrupt flag to avoid spurious interrupt */ I2C_ClearStatusFlag(base, i2cStatusInterrupt); /* Set I2C work under Tx mode */ I2C_SetDirMode(base, i2cDirectionTransmit); transfer->currentDir = i2cDirectionTransmit; transfer->txBuff = txBuffer; transfer->txSize = txSize; I2C_WriteByte(base, *transfer->txBuff); transfer->txBuff++; transfer->txSize--; /* Enable I2C interrupt, subsequent data transfer will be handled * in ISR. */ I2C_SetIntCmd(base, true); /* Wait for the transfer to complete */ k_sem_take(&data->device_sync_sem, K_FOREVER); return transfer->ack; }
/* Send encrypted data */ static int ssl_tx(void *context, const unsigned char *buf, size_t size) { struct http_client_ctx *ctx = context; struct net_pkt *send_pkt; int ret, len; send_pkt = net_pkt_get_tx(ctx->tcp.ctx, BUF_ALLOC_TIMEOUT); if (!send_pkt) { return MBEDTLS_ERR_SSL_ALLOC_FAILED; } ret = net_pkt_append_all(send_pkt, size, (u8_t *)buf, BUF_ALLOC_TIMEOUT); if (!ret) { /* Cannot append data */ net_pkt_unref(send_pkt); return MBEDTLS_ERR_SSL_ALLOC_FAILED; } len = size; ret = net_context_send(send_pkt, ssl_sent, K_NO_WAIT, NULL, ctx); if (ret < 0) { net_pkt_unref(send_pkt); return ret; } k_sem_take(&ctx->https.mbedtls.ssl_ctx.tx_sem, K_FOREVER); return len; }
static void tstack_thread_thread(struct k_stack *pstack) { k_sem_init(&end_sema, 0, 1); /**TESTPOINT: thread-thread data passing via stack*/ k_tid_t tid = k_thread_spawn(threadstack, STACK_SIZE, tThread_entry, pstack, NULL, NULL, K_PRIO_PREEMPT(0), 0, 0); tstack_push(pstack); k_sem_take(&end_sema, K_FOREVER); k_sem_take(&end_sema, K_FOREVER); tstack_pop(pstack); /* clear the spawn thread to avoid side effect */ k_thread_abort(tid); }
static int resolve_name(struct http_client_ctx *ctx, const char *server, enum dns_query_type type) { struct waiter dns_waiter; int ret; dns_waiter.ctx = ctx; k_sem_init(&dns_waiter.wait, 0, 1); ret = dns_get_addr_info(server, type, &ctx->dns_id, dns_cb, &dns_waiter, DNS_WAIT); if (ret < 0) { NET_ERR("Cannot resolve %s (%d)", server, ret); ctx->dns_id = 0; return ret; } /* Wait a little longer for the DNS to finish so that * the DNS will timeout before the semaphore. */ if (k_sem_take(&dns_waiter.wait, DNS_WAIT_SEM)) { NET_ERR("Timeout while resolving %s", server); ctx->dns_id = 0; return -ETIMEDOUT; } ctx->dns_id = 0; if (ctx->tcp.remote.family == AF_UNSPEC) { return -EINVAL; } return 0; }
static int start_https(struct http_client_ctx *ctx) { struct k_sem startup_sync; /* Start the thread that handles HTTPS traffic. */ if (ctx->https.tid) { return -EALREADY; } NET_DBG("Starting HTTPS thread for %p", ctx); k_sem_init(&startup_sync, 0, 1); ctx->https.tid = k_thread_create(&ctx->https.thread, ctx->https.stack, ctx->https.stack_size, (k_thread_entry_t)https_handler, ctx, &startup_sync, 0, K_PRIO_COOP(7), 0, 0); /* Wait until we know that the HTTPS thread startup was ok */ if (k_sem_take(&startup_sync, HTTPS_STARTUP_TIMEOUT) < 0) { https_shutdown(ctx); return -ECANCELED; } NET_DBG("HTTPS thread %p started for %p", ctx->https.tid, ctx); return 0; }
static int mcux_adc16_read(struct device *dev, struct adc_seq_table *seq_table) { const struct mcux_adc16_config *config = dev->config->config_info; struct mcux_adc16_data *data = dev->driver_data; ADC_Type *base = config->base; struct adc_seq_entry *entry = seq_table->entries; adc16_channel_config_t channel_config; u32_t channel_group = 0; int i; channel_config.enableInterruptOnConversionCompleted = true; #if defined(FSL_FEATURE_ADC16_HAS_DIFF_MODE) && FSL_FEATURE_ADC16_HAS_DIFF_MODE channel_config.enableDifferentialConversion = false; #endif for (i = 0; i < seq_table->num_entries; i++) { if (entry->buffer_length < sizeof(data->result)) { return -EINVAL; } channel_config.channelNumber = entry->channel_id; ADC16_SetChannelConfig(base, channel_group, &channel_config); data->channel_group = channel_group; k_sem_take(&data->sync, K_FOREVER); memcpy(entry->buffer, &data->result, sizeof(data->result)); entry++; } return 0; }
static int fxas21002_channel_get(struct device *dev, enum sensor_channel chan, struct sensor_value *val) { const struct fxas21002_config *config = dev->config->config_info; struct fxas21002_data *data = dev->driver_data; int start_channel; int num_channels; s16_t *raw; int ret; int i; k_sem_take(&data->sem, K_FOREVER); /* Start with an error return code by default, then clear it if we find * a supported sensor channel. */ ret = -ENOTSUP; /* Convert raw gyroscope data to the normalized sensor_value type. */ switch (chan) { case SENSOR_CHAN_GYRO_X: start_channel = FXAS21002_CHANNEL_GYRO_X; num_channels = 1; break; case SENSOR_CHAN_GYRO_Y: start_channel = FXAS21002_CHANNEL_GYRO_Y; num_channels = 1; break; case SENSOR_CHAN_GYRO_Z: start_channel = FXAS21002_CHANNEL_GYRO_Z; num_channels = 1; break; case SENSOR_CHAN_GYRO_XYZ: start_channel = FXAS21002_CHANNEL_GYRO_X; num_channels = 3; break; default: start_channel = 0; num_channels = 0; break; } raw = &data->raw[start_channel]; for (i = 0; i < num_channels; i++) { fxas21002_convert(val++, *raw++, config->range); } if (num_channels > 0) { ret = 0; } if (ret != 0) { SYS_LOG_ERR("Unsupported sensor channel"); } k_sem_give(&data->sem); return ret; }
static void tpipe_thread_thread(struct k_pipe *ppipe) { k_sem_init(&end_sema, 0, 1); /**TESTPOINT: thread-thread data passing via pipe*/ k_tid_t tid = k_thread_create(&tdata, tstack, STACK_SIZE, tThread_entry, ppipe, NULL, NULL, K_PRIO_PREEMPT(0), 0, 0); tpipe_put(ppipe); k_sem_take(&end_sema, K_FOREVER); k_sem_take(&end_sema, K_FOREVER); tpipe_get(ppipe); /* clear the spawned thread avoid side effect */ k_thread_abort(tid); }
void test_critical(void) { init_objects(); start_threads(); zassert_true(k_sem_take(&TEST_SEM, TEST_TIMEOUT * 2) == 0, "Timed out waiting for TEST_SEM"); }
void thread_sem0_test(void *p1, void *p2, void *p3) { k_sem_take(&sem_bench, 10);/* To sync threads */ k_sem_give(&sem_bench); sem_count++; k_thread_abort(sem0_tid); }
void defrag_task(void) { k_sem_take(&DEFRAG_SEM, K_FOREVER); /* Wait to be activated */ k_mem_pool_defrag(&POOL_ID); k_sem_give(®RESS_SEM); /* defrag_task is finished */ }