static void start_threads(void) { k_thread_spawn(stack1, STACK_SIZE, AlternateTask, NULL, NULL, NULL, K_PRIO_PREEMPT(12), 0, 0); k_thread_spawn(stack2, STACK_SIZE, RegressionTask, NULL, NULL, NULL, K_PRIO_PREEMPT(12), 0, 0); }
/*test cases*/ void test_msgq_purge_when_put(void) { struct k_msgq msgq; int ret; k_msgq_init(&msgq, tbuffer, MSG_SIZE, MSGQ_LEN); /*fill the queue to full*/ for (int i = 0; i < MSGQ_LEN; i++) { ret = k_msgq_put(&msgq, (void *)&data[i], K_NO_WAIT); zassert_equal(ret, 0, NULL); } /*create another thread waiting to put msg*/ k_tid_t tid = k_thread_spawn(tstack, STACK_SIZE, tThread_entry, &msgq, NULL, NULL, K_PRIO_PREEMPT(0), 0, 0); k_sleep(TIMEOUT >> 1); /**TESTPOINT: msgq purge while another thread waiting to put msg*/ k_msgq_purge(&msgq); k_sleep(TIMEOUT >> 1); k_thread_abort(tid); /*verify msg put after purge*/ for (int i = 0; i < MSGQ_LEN; i++) { ret = k_msgq_put(&msgq, (void *)&data[i], K_NO_WAIT); zassert_equal(ret, 0, NULL); } }
static void threads_suspend_resume(int prio) { int old_prio = k_thread_priority_get(k_current_get()); /* set current thread */ last_prio = prio; k_thread_priority_set(k_current_get(), last_prio); /* spawn thread with lower priority */ int spawn_prio = last_prio + 1; k_tid_t tid = k_thread_spawn(tstack, STACK_SIZE, thread_entry, NULL, NULL, NULL, spawn_prio, 0, 0); /* checkpoint: suspend current thread */ k_thread_suspend(tid); k_sleep(100); /* checkpoint: spawned thread shouldn't be executed after suspend */ assert_false(last_prio == spawn_prio, NULL); k_thread_resume(tid); k_sleep(100); /* checkpoint: spawned thread should be executed after resume */ assert_true(last_prio == spawn_prio, NULL); k_thread_abort(tid); /* restore environment */ k_thread_priority_set(k_current_get(), old_prio); }
static void init_tx_queue(void) { /* Transmit queue init */ k_fifo_init(&tx_queue); k_thread_spawn(tx_stack, sizeof(tx_stack), (k_thread_entry_t)tx_thread, NULL, NULL, NULL, K_PRIO_COOP(8), 0, K_NO_WAIT); }
static void init_rx_queue(void) { k_fifo_init(&rx_queue); rx_tid = k_thread_spawn(rx_stack, sizeof(rx_stack), (k_thread_entry_t)net_rx_thread, NULL, NULL, NULL, K_PRIO_COOP(8), K_ESSENTIAL, K_NO_WAIT); }
static void thread_alert(void) { handler_executed = 0; /**TESTPOINT: thread-thread sync via alert*/ k_tid_t tid = k_thread_spawn(tstack, STACK_SIZE, tThread_entry, NULL, NULL, NULL, K_PRIO_PREEMPT(0), 0, 0); alert_send(); k_sleep(TIMEOUT); k_thread_abort(tid); }
void main(void) { if (init_app() != 0) { printf("Cannot initialize network\n"); return; } k_thread_spawn(stack, STACK_SIZE, (k_thread_entry_t) dtls_client, NULL, NULL, NULL, K_PRIO_COOP(7), 0, 0); }
/*test cases*/ void test_sched_is_preempt_thread(void) { k_sem_init(&end_sema, 0, 1); /*create preempt thread*/ k_tid_t tid = k_thread_spawn(tstack, STACK_SIZE, tpreempt_ctx, NULL, NULL, NULL, K_PRIO_PREEMPT(1), 0, 0); k_sem_take(&end_sema, K_FOREVER); k_thread_abort(tid); /*create coop thread*/ tid = k_thread_spawn(tstack, STACK_SIZE, tcoop_ctx, NULL, NULL, NULL, K_PRIO_COOP(1), 0, 0); k_sem_take(&end_sema, K_FOREVER); k_thread_abort(tid); /*invoke isr*/ irq_offload(tIsr, NULL); }
static void h5_init(void) { BT_DBG(""); h5.link_state = UNINIT; h5.rx_state = START; h5.tx_win = 4; /* TX thread */ k_fifo_init(&h5.tx_queue); k_thread_spawn(tx_stack, sizeof(tx_stack), (k_thread_entry_t)tx_thread, NULL, NULL, NULL, K_PRIO_COOP(7), 0, K_NO_WAIT); k_fifo_init(&h5.rx_queue); k_thread_spawn(rx_stack, sizeof(rx_stack), (k_thread_entry_t)rx_thread, NULL, NULL, NULL, K_PRIO_COOP(7), 0, K_NO_WAIT); /* Unack queue */ k_fifo_init(&h5.unack_queue); /* Init delayed work */ k_delayed_work_init(&ack_work, ack_timeout); k_delayed_work_init(&retx_work, retx_timeout); }
static void tstack_thread_thread(struct k_stack *pstack) { k_sem_init(&end_sema, 0, 1); /**TESTPOINT: thread-thread data passing via stack*/ k_tid_t tid = k_thread_spawn(threadstack, STACK_SIZE, tThread_entry, pstack, NULL, NULL, K_PRIO_PREEMPT(0), 0, 0); tstack_push(pstack); k_sem_take(&end_sema, K_FOREVER); k_sem_take(&end_sema, K_FOREVER); tstack_pop(pstack); /* clear the spawn thread to avoid side effect */ k_thread_abort(tid); }
int hts221_init_interrupt(struct device *dev) { struct hts221_data *drv_data = dev->driver_data; /* setup data ready gpio interrupt */ drv_data->gpio = device_get_binding(CONFIG_HTS221_GPIO_DEV_NAME); if (drv_data->gpio == NULL) { SYS_LOG_ERR("Cannot get pointer to %s device.", CONFIG_HTS221_GPIO_DEV_NAME); return -EINVAL; } gpio_pin_configure(drv_data->gpio, CONFIG_HTS221_GPIO_PIN_NUM, GPIO_DIR_IN | GPIO_INT | GPIO_INT_EDGE | GPIO_INT_ACTIVE_HIGH | GPIO_INT_DEBOUNCE); gpio_init_callback(&drv_data->gpio_cb, hts221_gpio_callback, BIT(CONFIG_HTS221_GPIO_PIN_NUM)); if (gpio_add_callback(drv_data->gpio, &drv_data->gpio_cb) < 0) { SYS_LOG_ERR("Could not set gpio callback."); return -EIO; } /* enable data-ready interrupt */ if (i2c_reg_write_byte(drv_data->i2c, HTS221_I2C_ADDR, HTS221_REG_CTRL3, HTS221_DRDY_EN) < 0) { SYS_LOG_ERR("Could not enable data-ready interrupt."); return -EIO; } #if defined(CONFIG_HTS221_TRIGGER_OWN_THREAD) k_sem_init(&drv_data->gpio_sem, 0, UINT_MAX); k_thread_spawn(drv_data->thread_stack, CONFIG_HTS221_THREAD_STACK_SIZE, (k_thread_entry_t)hts221_thread, POINTER_TO_INT(dev), 0, NULL, K_PRIO_COOP(CONFIG_HTS221_THREAD_PRIORITY), 0, 0); #elif defined(CONFIG_HTS221_TRIGGER_GLOBAL_THREAD) drv_data->work.handler = hts221_work_cb; drv_data->dev = dev; #endif gpio_pin_enable_callback(drv_data->gpio, CONFIG_HTS221_GPIO_PIN_NUM); return 0; }
static int bt_spi_open(void) { /* Configure RST pin and hold BLE in Reset */ gpio_pin_configure(rst_dev, GPIO_RESET_PIN, GPIO_DIR_OUT | GPIO_PUD_PULL_UP); gpio_pin_write(rst_dev, GPIO_RESET_PIN, 0); spi_configure(spi_dev, &spi_conf); #if defined(CONFIG_BLUETOOTH_SPI_BLUENRG) /* Configure the CS (Chip Select) pin */ gpio_pin_configure(cs_dev, GPIO_CS_PIN, GPIO_DIR_OUT | GPIO_PUD_PULL_UP); gpio_pin_write(cs_dev, GPIO_CS_PIN, 1); #endif /* CONFIG_BLUETOOTH_SPI_BLUENRG */ /* Configure IRQ pin and the IRQ call-back/handler */ gpio_pin_configure(irq_dev, GPIO_IRQ_PIN, GPIO_DIR_IN | GPIO_INT | GPIO_INT_EDGE | GPIO_INT_ACTIVE_HIGH); gpio_init_callback(&gpio_cb, bt_spi_isr, BIT(GPIO_IRQ_PIN)); if (gpio_add_callback(irq_dev, &gpio_cb)) { return -EINVAL; } if (gpio_pin_enable_callback(irq_dev, GPIO_IRQ_PIN)) { return -EINVAL; } /* Start RX thread */ k_thread_spawn(rx_stack, sizeof(rx_stack), (k_thread_entry_t)bt_spi_rx_thread, NULL, NULL, NULL, K_PRIO_COOP(7), 0, K_NO_WAIT); /* Take BLE out of reset */ gpio_pin_write(rst_dev, GPIO_RESET_PIN, 1); /* Device will let us know when it's ready */ k_sem_take(&sem_initialised, K_FOREVER); return 0; }
static int telnet_console_init(struct device *arg) { #ifdef CONFIG_NET_IPV4 struct sockaddr_in any_addr4 = { .sin_family = AF_INET, .sin_port = htons(TELNET_PORT), .sin_addr = INADDR_ANY_INIT }; static struct net_context *ctx4; #endif #ifdef CONFIG_NET_IPV6 struct sockaddr_in6 any_addr6 = { .sin6_family = AF_INET6, .sin6_port = htons(TELNET_PORT), .sin6_addr = IN6ADDR_ANY_INIT }; static struct net_context *ctx6; #endif #ifdef CONFIG_NET_IPV4 telnet_setup_server(&ctx4, AF_INET, (struct sockaddr *)&any_addr4, sizeof(any_addr4)); #endif #ifdef CONFIG_NET_IPV6 telnet_setup_server(&ctx6, AF_INET6, (struct sockaddr *)&any_addr6, sizeof(any_addr6)); #endif k_thread_spawn(&telnet_stack[0], TELNET_STACK_SIZE, (k_thread_entry_t)telnet_run, NULL, NULL, NULL, K_PRIO_COOP(TELNET_PRIORITY), 0, K_MSEC(10)); SYS_LOG_INF("Telnet console initialized"); return 0; }
int sx9500_setup_interrupt(struct device *dev) { struct sx9500_data *data = dev->driver_data; struct device *gpio; #ifdef CONFIG_SX9500_TRIGGER_OWN_THREAD k_sem_init(&data->sem, 0, UINT_MAX); #else data->work.handler = sx9500_work_cb; data->dev = dev; #endif gpio = device_get_binding(CONFIG_SX9500_GPIO_CONTROLLER); if (!gpio) { SYS_LOG_DBG("sx9500: gpio controller %s not found", CONFIG_SX9500_GPIO_CONTROLLER); return -EINVAL; } gpio_pin_configure(gpio, CONFIG_SX9500_GPIO_PIN, GPIO_DIR_IN | GPIO_INT | GPIO_INT_EDGE | GPIO_INT_ACTIVE_LOW | GPIO_INT_DEBOUNCE); gpio_init_callback(&data->gpio_cb, sx9500_gpio_cb, BIT(CONFIG_SX9500_GPIO_PIN)); gpio_add_callback(gpio, &data->gpio_cb); gpio_pin_enable_callback(gpio, CONFIG_SX9500_GPIO_PIN); #ifdef CONFIG_SX9500_TRIGGER_OWN_THREAD k_thread_spawn(sx9500_thread_stack, CONFIG_SX9500_THREAD_STACK_SIZE, sx9500_thread_main, POINTER_TO_INT(dev), 0, NULL, K_PRIO_COOP(CONFIG_SX9500_THREAD_PRIORITY), 0, 0); #endif return 0; }
int ipm_console_receiver_init(struct device *d) { const struct ipm_console_receiver_config_info *config_info = d->config->config_info; struct ipm_console_receiver_runtime_data *driver_data = d->driver_data; struct device *ipm; ipm = device_get_binding(config_info->bind_to); if (!ipm) { printk("unable to bind IPM console receiver to '%s'\n", config_info->bind_to); return -EINVAL; } if (ipm_max_id_val_get(ipm) < 0xFF) { printk("IPM driver %s doesn't support 8-bit id values", config_info->bind_to); return -EINVAL; } driver_data->ipm_device = ipm; driver_data->channel_disabled = 0; k_sem_init(&driver_data->sem, 0, UINT_MAX); sys_ring_buf_init(&driver_data->rb, config_info->rb_size32, config_info->ring_buf_data); ipm_register_callback(ipm, ipm_console_receive_callback, d); k_thread_spawn(config_info->thread_stack, CONFIG_IPM_CONSOLE_STACK_SIZE, ipm_console_thread, d, NULL, NULL, K_PRIO_COOP(IPM_CONSOLE_PRI), 0, 0); ipm_set_enabled(ipm, 1); return 0; }
void main(void) { int rv; /* return value from tests */ volatile int error; /* used to create a divide by zero error */ TC_START("Starting static IDT tests"); TC_PRINT("Testing to see if IDT has address of test stubs()\n"); rv = idt_stub_test(); if (rv != TC_PASS) { goto done_tests; } TC_PRINT("Testing to see interrupt handler executes properly\n"); _trigger_isr_handler(); if (int_handler_executed == 0) { TC_ERROR("Interrupt handler did not execute\n"); rv = TC_FAIL; goto done_tests; } else if (int_handler_executed != 1) { TC_ERROR("Interrupt handler executed more than once! (%d)\n", int_handler_executed); rv = TC_FAIL; goto done_tests; } TC_PRINT("Testing to see exception handler executes properly\n"); /* * Use exc_handler_executed instead of 0 to prevent the compiler issuing a * 'divide by zero' warning. */ error = 32; /* avoid static checker uninitialized warnings */ error = error / exc_handler_executed; if (exc_handler_executed == 0) { TC_ERROR("Exception handler did not execute\n"); rv = TC_FAIL; goto done_tests; } else if (exc_handler_executed != 1) { TC_ERROR("Exception handler executed more than once! (%d)\n", exc_handler_executed); rv = TC_FAIL; goto done_tests; } /* * Start task to trigger the spurious interrupt handler */ TC_PRINT("Testing to see spurious handler executes properly\n"); k_thread_spawn(my_stack_area, MY_STACK_SIZE, idt_spur_task, NULL, NULL, NULL, MY_PRIORITY, 0, K_NO_WAIT); /* * The fiber/task should not run past where the spurious interrupt is * generated. Therefore spur_handler_aborted_thread should remain at 1. */ if (spur_handler_aborted_thread == 0) { TC_ERROR("Spurious handler did not execute as expected\n"); rv = TC_FAIL; goto done_tests; } done_tests: TC_END(rv, "%s - %s.\n", rv == TC_PASS ? PASS : FAIL, __func__); TC_END_REPORT(rv); }
void main(void) { k_thread_spawn(&thread_stack[0], STACKSIZE, (k_thread_entry_t)main_thread, NULL, NULL, NULL, K_PRIO_COOP(7), 0, 0); }
/** * @brief Entry point to timer tests * * This is the entry point to the CPU and thread tests. * * @return N/A */ void main(void) { int rv; /* return value from tests */ thread_detected_error = 0; thread_evidence = 0; TC_START("Test kernel CPU and thread routines"); TC_PRINT("Initializing kernel objects\n"); rv = kernel_init_objects(); if (rv != TC_PASS) { goto tests_done; } #ifdef HAS_POWERSAVE_INSTRUCTION TC_PRINT("Testing k_cpu_idle()\n"); rv = test_kernel_cpu_idle(0); if (rv != TC_PASS) { goto tests_done; } #ifndef CONFIG_ARM TC_PRINT("Testing k_cpu_atomic_idle()\n"); rv = test_kernel_cpu_idle(1); if (rv != TC_PASS) { goto tests_done; } #endif #endif TC_PRINT("Testing interrupt locking and unlocking\n"); rv = test_kernel_interrupts(irq_lock_wrapper, irq_unlock_wrapper, -1); if (rv != TC_PASS) { goto tests_done; } #ifdef TICK_IRQ /* Disable interrupts coming from the timer. */ TC_PRINT("Testing irq_disable() and irq_enable()\n"); rv = test_kernel_interrupts(irq_disable_wrapper, irq_enable_wrapper, TICK_IRQ); if (rv != TC_PASS) { goto tests_done; } #endif TC_PRINT("Testing some kernel context routines\n"); rv = test_kernel_ctx_task(); if (rv != TC_PASS) { goto tests_done; } TC_PRINT("Spawning a thread from a task\n"); thread_evidence = 0; k_thread_spawn(thread_stack1, THREAD_STACKSIZE, thread_entry, k_current_get(), NULL, NULL, K_PRIO_COOP(THREAD_PRIORITY), 0, 0); if (thread_evidence != 1) { rv = TC_FAIL; TC_ERROR(" - thread did not execute as expected!\n"); goto tests_done; } /* * The thread ran, now wake it so it can test k_current_get and * k_is_in_isr. */ TC_PRINT("Thread to test k_current_get() and " "k_is_in_isr()\n"); k_sem_give(&sem_thread); if (thread_detected_error != 0) { rv = TC_FAIL; TC_ERROR(" - failure detected in thread; " "thread_detected_error = %d\n", thread_detected_error); goto tests_done; } TC_PRINT("Thread to test k_yield()\n"); k_sem_give(&sem_thread); if (thread_detected_error != 0) { rv = TC_FAIL; TC_ERROR(" - failure detected in thread; " "thread_detected_error = %d\n", thread_detected_error); goto tests_done; } k_sem_give(&sem_thread); rv = test_timeout(); if (rv != TC_PASS) { goto tests_done; } tests_done: TC_END_RESULT(rv); TC_END_REPORT(rv); }
static int test_timeout(void) { struct timeout_order *data; s32_t timeout; int rv; int i; /* test k_busy_wait() */ TC_PRINT("Testing k_busy_wait()\n"); timeout = 20; /* in ms */ k_thread_spawn(timeout_stacks[0], THREAD_STACKSIZE, test_busy_wait, (void *)(intptr_t) timeout, NULL, NULL, K_PRIO_COOP(THREAD_PRIORITY), 0, 0); rv = k_sem_take(&reply_timeout, timeout * 2); if (rv) { TC_ERROR(" *** task timed out waiting for " "k_busy_wait()\n"); return TC_FAIL; } /* test k_sleep() */ TC_PRINT("Testing k_sleep()\n"); timeout = 50; k_thread_spawn(timeout_stacks[0], THREAD_STACKSIZE, test_thread_sleep, (void *)(intptr_t) timeout, NULL, NULL, K_PRIO_COOP(THREAD_PRIORITY), 0, 0); rv = k_sem_take(&reply_timeout, timeout * 2); if (rv) { TC_ERROR(" *** task timed out waiting for thread on " "k_sleep().\n"); return TC_FAIL; } /* test k_thread_spawn() without cancellation */ TC_PRINT("Testing k_thread_spawn() without cancellation\n"); for (i = 0; i < NUM_TIMEOUT_THREADS; i++) { k_thread_spawn(timeout_stacks[i], THREAD_STACKSIZE, delayed_thread, (void *)i, NULL, NULL, K_PRIO_COOP(5), 0, timeouts[i].timeout); } for (i = 0; i < NUM_TIMEOUT_THREADS; i++) { data = k_fifo_get(&timeout_order_fifo, 750); if (!data) { TC_ERROR (" *** timeout while waiting for delayed thread\n"); return TC_FAIL; } if (data->timeout_order != i) { TC_ERROR(" *** wrong delayed thread ran (got %d, " "expected %d)\n", data->timeout_order, i); return TC_FAIL; } TC_PRINT(" got thread (q order: %d, t/o: %d) as expected\n", data->q_order, data->timeout); } /* ensure no more thread fire */ data = k_fifo_get(&timeout_order_fifo, 750); if (data) { TC_ERROR(" *** got something unexpected in the fifo\n"); return TC_FAIL; } /* test k_thread_spawn() with cancellation */ TC_PRINT("Testing k_thread_spawn() with cancellations\n"); int cancellations[] = { 0, 3, 4, 6 }; int num_cancellations = ARRAY_SIZE(cancellations); int next_cancellation = 0; k_tid_t delayed_threads[NUM_TIMEOUT_THREADS]; for (i = 0; i < NUM_TIMEOUT_THREADS; i++) { k_tid_t id; id = k_thread_spawn(timeout_stacks[i], THREAD_STACKSIZE, delayed_thread, (void *)i, NULL, NULL, K_PRIO_COOP(5), 0, timeouts[i].timeout); delayed_threads[i] = id; } for (i = 0; i < NUM_TIMEOUT_THREADS; i++) { int j; if (i == cancellations[next_cancellation]) { TC_PRINT(" cancelling " "[q order: %d, t/o: %d, t/o order: %d]\n", timeouts[i].q_order, timeouts[i].timeout, i); for (j = 0; j < NUM_TIMEOUT_THREADS; j++) { if (timeouts[j].timeout_order == i) { break; } } if (j < NUM_TIMEOUT_THREADS) { k_thread_cancel(delayed_threads[j]); ++next_cancellation; continue; } } data = k_fifo_get(&timeout_order_fifo, 2750); if (!data) { TC_ERROR (" *** timeout while waiting for delayed thread\n"); return TC_FAIL; } if (data->timeout_order != i) { TC_ERROR(" *** wrong delayed thread ran (got %d, " "expected %d)\n", data->timeout_order, i); return TC_FAIL; } TC_PRINT(" got (q order: %d, t/o: %d, t/o order %d) " "as expected\n", data->q_order, data->timeout, data->timeout_order); } if (num_cancellations != next_cancellation) { TC_ERROR(" *** wrong number of cancellations (expected %d, " "got %d\n", num_cancellations, next_cancellation); return TC_FAIL; } /* ensure no more thread fire */ data = k_fifo_get(&timeout_order_fifo, 750); if (data) { TC_ERROR(" *** got something unexpected in the fifo\n"); return TC_FAIL; } return TC_PASS; }
/** * * @brief Test the k_yield() routine * * This routine tests the k_yield() routine. It starts another thread * (thus also testing k_thread_spawn() and checks that behaviour of * k_yield() against the cases of there being a higher priority thread, * a lower priority thread, and another thread of equal priority. * * On error, it may set <thread_detected_error> to one of the following values: * 10 - helper thread ran prematurely * 11 - k_yield() did not yield to a higher priority thread * 12 - k_yield() did not yield to an equal prioirty thread * 13 - k_yield() yielded to a lower priority thread * * @return TC_PASS on success * @return TC_FAIL on failure */ static int test_k_yield(void) { k_tid_t self_thread_id; /* * Start a thread of higher priority. Note that since the new thread is * being started from a thread, it will not automatically switch to the * thread as it would if done from a task. */ self_thread_id = k_current_get(); thread_evidence = 0; k_thread_spawn(thread_stack2, THREAD_STACKSIZE, thread_helper, NULL, NULL, NULL, K_PRIO_COOP(THREAD_PRIORITY - 1), 0, 0); if (thread_evidence != 0) { /* ERROR! Helper spawned at higher */ thread_detected_error = 10; /* priority ran prematurely. */ return TC_FAIL; } /* * Test that the thread will yield to the higher priority helper. * <thread_evidence> is still 0. */ k_yield(); if (thread_evidence == 0) { /* ERROR! Did not yield to higher */ thread_detected_error = 11; /* priority thread. */ return TC_FAIL; } if (thread_evidence > 1) { /* ERROR! Helper did not yield to */ thread_detected_error = 12; /* equal priority thread. */ return TC_FAIL; } /* * Raise the priority of thread_entry(). Calling k_yield() should * not result in switching to the helper. */ k_thread_priority_set(self_thread_id, self_thread_id->base.prio - 1); k_yield(); if (thread_evidence != 1) { /* ERROR! Context switched to a lower */ thread_detected_error = 13; /* priority thread! */ return TC_FAIL; } /* * Block on <sem_thread>. This will allow the helper thread to * complete. The main task will wake this thread. */ k_sem_take(&sem_thread, K_FOREVER); return TC_PASS; }