/** * This form of dq_init supports initialization in deque headers in memory * mapped I/O regions. In this case, the slot buffer is an array in memory * mapped I/O space. It must be located at a higher address than the deque * header. The index passed by the caller is relative to the start of this * deque header. * @param deque_size Number of items to be stored in the new deque. * @param item_size Size of each of the items. (Consider this to be a maximum size.) * @param index Index relative to the start of the header of the first byte in the * memory mapped region to store item data. * @param header Pointer to the deque header to be initialized. This header * is located within a memory mapped region. */ void dq_init_memmap(ushort deque_size, ushort item_size, size_t index, PDQHEADER header) { dq_init(deque_size, item_size, header); // perform classic initialization free(header->dqbuff); // free the allocated deque buffer header->dqbuff = NULL; // NULL the buffer pointer header->memmapped = 1; // set memory mapped region bit header->dqbuffx = index; // index is relative to first byte of header }
void sem_initialize(void) { /* Initialize the queue of named semaphores */ dq_init(&g_nsems); /* Initialize holder structures needed to support priority inheritiance */ sem_initholders(); }
void netlink_initialize(void) { int i; /* Initialize the queues */ dq_init(&g_free_netlink_connections); dq_init(&g_active_netlink_connections); nxsem_init(&g_free_sem, 0, 1); for (i = 0; i < CONFIG_NET_NETLINK_CONNS; i++) { FAR struct netlink_conn_s *conn = &g_netlink_connections[i]; /* Mark the connection closed and move it to the free list */ memset(conn, 0, sizeof(*conn)); dq_addlast(&conn->node, &g_free_netlink_connections); } }
void uip_udpinit(void) { int i; /* Initialize the queues */ dq_init(&g_free_udp_connections); dq_init(&g_active_udp_connections); sem_init(&g_free_sem, 0, 1); for (i = 0; i < CONFIG_NET_UDP_CONNS; i++) { /* Mark the connection closed and move it to the free list */ g_udp_connections[i].lport = 0; dq_addlast(&g_udp_connections[i].node, &g_free_udp_connections); } g_last_udp_port = 1024; }
void uip_tcpinit(void) { int i; /* Initialize the queues */ dq_init(&g_free_tcp_connections); dq_init(&g_active_tcp_connections); /* Now initialize each connection structure */ for (i = 0; i < CONFIG_NET_TCP_CONNS; i++) { /* Mark the connection closed and move it to the free list */ g_tcp_connections[i].tcpstateflags = UIP_CLOSED; dq_addlast(&g_tcp_connections[i].node, &g_free_tcp_connections); } g_last_tcp_port = 1024; }
/* * Parse a file path and return a structure containing its * components. * * This function takes a file path (e.g. /var/tmp/myexample.txt) * and produces a structure containing points to the path elements. * * typedef struct _PATHINFO_STRUCT { * char* fullpath; * char* filename; * char* dirpath; * char* extension; * int isdir; * } PATHINFO_STRUCT; * * The PATHINFO_STRUCT structure is allocated from the heap, as are * the strings pointed two by its members. This memory must be * returned to the heap by calling pathinfo_release, passing a pointer * to the PATHINFO_STRUCT returned by pathinfo_parse. */ PATHINFO_STRUCT* pathinfo_parse_filepath(const char* pathname) { DQHEADER deque; PATHINFO_STRUCT* pathinfop; char* pathdelim = "/"; char* pathbuff; char* tokenp = NULL; char* token_aheadp = NULL; int isdir = 0; // Get a pathinfo structure pathinfop = calloc(1, sizeof(PATHINFO_STRUCT)); pathinfop->fullpath = strdup(pathname); // Initialize a plenty big deque to hold path components. dq_init(strlen(pathname) + 1, sizeof(char), &deque); // Break the file path into tokens. Copy to a working buffer // so we don't alter path with calls to strtok pathbuff = strdup(pathname); // Check for leading / if (pathbuff[0] == '/') { push_token(&deque, "/"); } isdir = pathinfo_is_dir(pathname); pathinfop->isdir = isdir; // Get first token and look ahead token tokenp = strtok(pathbuff, pathdelim); token_aheadp = strtok(NULL, pathdelim); while (tokenp != NULL) { if (token_aheadp != NULL) { // More tokens in path string push_token(&deque, tokenp); push_token(&deque, "/"); tokenp = token_aheadp; token_aheadp = strtok(NULL, pathdelim); } else { // tokenp is the last token in the sequence if (!isdir) { parse_filename(pathinfop, tokenp); } else { push_token(&deque, tokenp); } tokenp = NULL; } } set_dirpath(pathinfop, &deque); free(pathbuff); return pathinfop; }
void aio_initialize(void) { int i; /* Initialize counting semaphores */ (void)sem_init(&g_aioc_freesem, 0, CONFIG_FS_NAIOC); (void)sem_init(&g_aio_exclsem, 0, 1); g_aio_holder = INVALID_PROCESS_ID; /* Initialize the container queues */ dq_init(&g_aioc_free); dq_init(&g_aio_pending); /* Add all of the pre-allocated AIO containers to the free list */ for (i = 0; i < CONFIG_FS_NAIOC; i++) { /* Add the container to the free list */ dq_addlast(&g_aioc_alloc[i].aioc_link, &g_aioc_free); } }
int work_lpstart(void) { pid_t pid; int wndx; /* Initialize work queue data structures */ memset(&g_lpwork, 0, sizeof(struct kwork_wqueue_s)); g_lpwork.delay = CONFIG_SCHED_LPWORKPERIOD / USEC_PER_TICK; dq_init(&g_lpwork.q); /* Don't permit any of the threads to run until we have fully initialized * g_lpwork. */ sched_lock(); /* Start the low-priority, kernel mode worker thread(s) */ sinfo("Starting low-priority kernel worker thread(s)\n"); for (wndx = 0; wndx < CONFIG_SCHED_LPNTHREADS; wndx++) { pid = kernel_thread(LPWORKNAME, CONFIG_SCHED_LPWORKPRIORITY, CONFIG_SCHED_LPWORKSTACKSIZE, (main_t)work_lpthread, (FAR char * const *)NULL); DEBUGASSERT(pid > 0); if (pid < 0) { int errcode = errno; DEBUGASSERT(errcode > 0); serr("ERROR: kernel_thread %d failed: %d\n", wndx, errcode); sched_unlock(); return -errcode; } g_lpwork.worker[wndx].pid = pid; g_lpwork.worker[wndx].busy = true; } sched_unlock(); return g_lpwork.worker[0].pid; }
MavlinkFTP::MavlinkFTP() { // initialise the request freelist dq_init(&_workFree); sem_init(&_lock, 0, 1); // initialize session list for (size_t i=0; i<kMaxSession; i++) { _session_fds[i] = -1; } // drop work entries onto the free list for (unsigned i = 0; i < kRequestQueueSize; i++) { _qFree(&_workBufs[i]); } }
int main() { deque *d = dq_init(); printf("---push&pop test---\n"); print_push(1, d); print_push(2, d); print_push(3, d); print_pop(d); print_pop(d); print_pop(d); printf("---pop empty test---\n"); print_pop(d); printf("---inject&eject test---\n"); print_inject(1, d); print_inject(2, d); print_inject(3, d); print_eject(d); print_eject(d); print_eject(d); printf("---eject empty test---\n"); print_eject(d); printf("---mix test---\n"); print_push(1, d); print_push(2, d); print_push(3, d); print_eject(d); print_eject(d); print_eject(d); print_inject(1, d); print_inject(2, d); print_inject(3, d); print_pop(d); print_pop(d); print_pop(d); return 0; }
MavlinkFTP::MavlinkFTP() : _request_bufs{}, _request_queue{}, _request_queue_sem{}, _utRcvMsgFunc{}, _ftp_test{} { // initialise the request freelist dq_init(&_request_queue); sem_init(&_request_queue_sem, 0, 1); // initialize session list for (size_t i=0; i<kMaxSession; i++) { _session_fds[i] = -1; } // drop work entries onto the free list for (unsigned i = 0; i < kRequestQueueSize; i++) { _return_request(&_request_bufs[i]); } }
void local_initialize(void) { #ifdef CONFIG_NET_LOCAL_STREAM dq_init(&g_local_listeners); #endif }
{ "q2-rm-ratio", 226, "<NUM>", 0, "Number of items inserted into queue 2 on a insertion", 3}, { "q3-rm-ratio", 227, "<NUM>", 0, "Number of items inserted into queue 3 on a insertion", 3}, { "q4-rm-ratio", 228, "<NUM>", 0, "Number of items inserted into queue 4 on a insertion", 3}, { "computation-load", 224, "<NUM>", 0, "Highest number of random number from 0 to N (0-24). Simulates computation.\ Table of load times is in root folder of project.", 3}, { 0 } }; struct argp argp = { options, parse_opt, 0, doc }; int arg_lb_count = 4; argp_parse(&argp, argc, argv, 0, 0, &arg_lb_count); atomic_init(&finished, 0); atomic_init(&np, 0); atomic_init(&nc, 0); pthread_t *cb_threads = dq_init(work, NULL, sizeof(int), queue_count_arg, TWO_TO_ONE, load_balance_thread_arg, local_lb_threshold_percent, local_lb_threshold_static, threshold_type_arg, local_balance_type_arg, hook, max_qsize); for (int i = 0; i < (queue_count_arg * TWO_TO_ONE); i++ ) { pthread_join(cb_threads[i], NULL); } dq_destroy(); pthread_barrier_destroy(&barrier); free(q_ins_ratios); free(q_rm_ratios); printf("Main finished\n"); return 0; }
void os_start(void) { int i; slldbg("Entry\n"); /* Initialize all task lists */ dq_init(&g_readytorun); dq_init(&g_pendingtasks); dq_init(&g_waitingforsemaphore); #ifndef CONFIG_DISABLE_SIGNALS dq_init(&g_waitingforsignal); #endif #ifndef CONFIG_DISABLE_MQUEUE dq_init(&g_waitingformqnotfull); dq_init(&g_waitingformqnotempty); #endif #ifdef CONFIG_PAGING dq_init(&g_waitingforfill); #endif dq_init(&g_inactivetasks); sq_init(&g_delayeddeallocations); /* Initialize the logic that determine unique process IDs. */ g_lastpid = 0; for (i = 0; i < CONFIG_MAX_TASKS; i++) { g_pidhash[i].tcb = NULL; g_pidhash[i].pid = INVALID_PROCESS_ID; } /* Assign the process ID of ZERO to the idle task */ g_pidhash[ PIDHASH(0)].tcb = &g_idletcb; g_pidhash[ PIDHASH(0)].pid = 0; /* Initialize a TCB for this thread of execution. NOTE: The default * value for most components of the g_idletcb are zero. The entire * structure is set to zero. Then only the (potentially) non-zero * elements are initialized. NOTE: The idle task is the only task in * that has pid == 0 and sched_priority == 0. */ bzero((void*)&g_idletcb, sizeof(_TCB)); g_idletcb.task_state = TSTATE_TASK_RUNNING; g_idletcb.entry.main = (main_t)os_start; #if CONFIG_TASK_NAME_SIZE > 0 strncpy(g_idletcb.name, g_idlename, CONFIG_TASK_NAME_SIZE-1); g_idletcb.argv[0] = g_idletcb.name; #else g_idletcb.argv[0] = (char*)g_idlename; #endif /* CONFIG_TASK_NAME_SIZE */ /* Then add the idle task's TCB to the head of the ready to run list */ dq_addfirst((FAR dq_entry_t*)&g_idletcb, (FAR dq_queue_t*)&g_readytorun); /* Initialize the processor-specific portion of the TCB */ g_idletcb.flags = TCB_FLAG_TTYPE_KERNEL; up_initial_state(&g_idletcb); /* Initialize the semaphore facility(if in link). This has to be done * very early because many subsystems depend upon fully functional * semaphores. */ #ifdef CONFIG_HAVE_WEAKFUNCTIONS if (sem_initialize != NULL) #endif { sem_initialize(); } /* Initialize the memory manager */ #ifndef CONFIG_HEAP_BASE { FAR void *heap_start; size_t heap_size; up_allocate_heap(&heap_start, &heap_size); kmm_initialize(heap_start, heap_size); } #else kmm_initialize((void*)CONFIG_HEAP_BASE, CONFIG_HEAP_SIZE); #endif /* Initialize the interrupt handling subsystem (if included) */ #ifdef CONFIG_HAVE_WEAKFUNCTIONS if (irq_initialize != NULL) #endif { irq_initialize(); } /* Initialize the watchdog facility (if included in the link) */ #ifdef CONFIG_HAVE_WEAKFUNCTIONS if (wd_initialize != NULL) #endif { wd_initialize(); } /* Initialize the POSIX timer facility (if included in the link) */ #ifndef CONFIG_DISABLE_CLOCK #ifdef CONFIG_HAVE_WEAKFUNCTIONS if (clock_initialize != NULL) #endif { clock_initialize(); } #endif #ifndef CONFIG_DISABLE_POSIX_TIMERS #ifdef CONFIG_HAVE_WEAKFUNCTIONS if (timer_initialize != NULL) #endif { timer_initialize(); } #endif /* Initialize the signal facility (if in link) */ #ifndef CONFIG_DISABLE_SIGNALS #ifdef CONFIG_HAVE_WEAKFUNCTIONS if (sig_initialize != NULL) #endif { sig_initialize(); } #endif /* Initialize the named message queue facility (if in link) */ #ifndef CONFIG_DISABLE_MQUEUE #ifdef CONFIG_HAVE_WEAKFUNCTIONS if (mq_initialize != NULL) #endif { mq_initialize(); } #endif /* Initialize the thread-specific data facility (if in link) */ #ifndef CONFIG_DISABLE_PTHREAD #ifdef CONFIG_HAVE_WEAKFUNCTIONS if (pthread_initialize != NULL) #endif { pthread_initialize(); } #endif /* Initialize the file system (needed to support device drivers) */ #if CONFIG_NFILE_DESCRIPTORS > 0 #ifdef CONFIG_HAVE_WEAKFUNCTIONS if (fs_initialize != NULL) #endif { fs_initialize(); } #endif /* Initialize the network system */ #ifdef CONFIG_NET #if 0 if (net_initialize != NULL) #endif { net_initialize(); } #endif /* The processor specific details of running the operating system * will be handled here. Such things as setting up interrupt * service routines and starting the clock are some of the things * that are different for each processor and hardware platform. */ up_initialize(); /* Initialize the C libraries (if included in the link). This * is done last because the libraries may depend on the above. */ #ifdef CONFIG_HAVE_WEAKFUNCTIONS if (lib_initialize != NULL) #endif { lib_initialize(); } /* Create stdout, stderr, stdin on the IDLE task. These will be * inherited by all of the threads created by the IDLE task. */ (void)sched_setupidlefiles(&g_idletcb); /* Create initial tasks and bring-up the system */ (void)os_bringup(); /* When control is return to this point, the system is idle. */ sdbg("Beginning Idle Loop\n"); for (;;) { /* Perform garbage collection (if it is not being done by the worker * thread). This cleans-up memory de-allocations that were queued * because they could not be freed in that execution context (for * example, if the memory was freed from an interrupt handler). */ #ifndef CONFIG_SCHED_WORKQUEUE /* We must have exclusive access to the memory manager to do this * BUT the idle task cannot wait on a semaphore. So we only do * the cleanup now if we can get the semaphore -- this should be * possible because if the IDLE thread is running, no other task is! */ if (kmm_trysemaphore() == 0) { sched_garbagecollection(); kmm_givesemaphore(); } #endif /* Perform any processor-specific idle state operations */ up_idle(); } }
void os_start(void) { int i; slldbg("Entry\n"); /* Initialize RTOS Data ***************************************************/ /* Initialize all task lists */ dq_init(&g_readytorun); dq_init(&g_pendingtasks); dq_init(&g_waitingforsemaphore); #ifndef CONFIG_DISABLE_SIGNALS dq_init(&g_waitingforsignal); #endif #ifndef CONFIG_DISABLE_MQUEUE dq_init(&g_waitingformqnotfull); dq_init(&g_waitingformqnotempty); #endif #ifdef CONFIG_PAGING dq_init(&g_waitingforfill); #endif dq_init(&g_inactivetasks); sq_init(&g_delayed_kufree); #if (defined(CONFIG_BUILD_PROTECTED) || defined(CONFIG_BUILD_KERNEL)) && \ defined(CONFIG_MM_KERNEL_HEAP) sq_init(&g_delayed_kfree); #endif /* Initialize the logic that determine unique process IDs. */ g_lastpid = 0; for (i = 0; i < CONFIG_MAX_TASKS; i++) { g_pidhash[i].tcb = NULL; g_pidhash[i].pid = INVALID_PROCESS_ID; } /* Assign the process ID of ZERO to the idle task */ g_pidhash[PIDHASH(0)].tcb = &g_idletcb.cmn; g_pidhash[PIDHASH(0)].pid = 0; /* Initialize the IDLE task TCB *******************************************/ /* Initialize a TCB for this thread of execution. NOTE: The default * value for most components of the g_idletcb are zero. The entire * structure is set to zero. Then only the (potentially) non-zero * elements are initialized. NOTE: The idle task is the only task in * that has pid == 0 and sched_priority == 0. */ bzero((void*)&g_idletcb, sizeof(struct task_tcb_s)); g_idletcb.cmn.task_state = TSTATE_TASK_RUNNING; g_idletcb.cmn.entry.main = (main_t)os_start; g_idletcb.cmn.flags = TCB_FLAG_TTYPE_KERNEL; /* Set the IDLE task name */ #if CONFIG_TASK_NAME_SIZE > 0 strncpy(g_idletcb.cmn.name, g_idlename, CONFIG_TASK_NAME_SIZE); g_idletcb.cmn.name[CONFIG_TASK_NAME_SIZE] = '\0'; #endif /* CONFIG_TASK_NAME_SIZE */ /* Configure the task name in the argument list. The IDLE task does * not really have an argument list, but this name is still useful * for things like the NSH PS command. * * In the kernel mode build, the arguments are saved on the task's stack * and there is no support that yet. */ #if CONFIG_TASK_NAME_SIZE > 0 g_idleargv[0] = g_idletcb.cmn.name; #else g_idleargv[0] = (FAR char *)g_idlename; #endif /* CONFIG_TASK_NAME_SIZE */ g_idleargv[1] = NULL; g_idletcb.argv = g_idleargv; /* Then add the idle task's TCB to the head of the ready to run list */ dq_addfirst((FAR dq_entry_t*)&g_idletcb, (FAR dq_queue_t*)&g_readytorun); /* Initialize the processor-specific portion of the TCB */ up_initial_state(&g_idletcb.cmn); /* Initialize RTOS facilities *********************************************/ /* Initialize the semaphore facility. This has to be done very early * because many subsystems depend upon fully functional semaphores. */ sem_initialize(); #if defined(MM_KERNEL_USRHEAP_INIT) || defined(CONFIG_MM_KERNEL_HEAP) || defined(CONFIG_MM_PGALLOC) /* Initialize the memory manager */ { FAR void *heap_start; size_t heap_size; #ifdef MM_KERNEL_USRHEAP_INIT /* Get the user-mode heap from the platform specific code and configure * the user-mode memory allocator. */ up_allocate_heap(&heap_start, &heap_size); kumm_initialize(heap_start, heap_size); #endif #ifdef CONFIG_MM_KERNEL_HEAP /* Get the kernel-mode heap from the platform specific code and configure * the kernel-mode memory allocator. */ up_allocate_kheap(&heap_start, &heap_size); kmm_initialize(heap_start, heap_size); #endif #ifdef CONFIG_MM_PGALLOC /* If there is a page allocator in the configuration, then get the page * heap information from the platform-specific code and configure the * page allocator. */ up_allocate_pgheap(&heap_start, &heap_size); mm_pginitialize(heap_start, heap_size); #endif } #endif #if defined(CONFIG_SCHED_HAVE_PARENT) && defined(CONFIG_SCHED_CHILD_STATUS) /* Initialize tasking data structures */ #ifdef CONFIG_HAVE_WEAKFUNCTIONS if (task_initialize != NULL) #endif { task_initialize(); } #endif /* Initialize the interrupt handling subsystem (if included) */ #ifdef CONFIG_HAVE_WEAKFUNCTIONS if (irq_initialize != NULL) #endif { irq_initialize(); } /* Initialize the watchdog facility (if included in the link) */ #ifdef CONFIG_HAVE_WEAKFUNCTIONS if (wd_initialize != NULL) #endif { wd_initialize(); } /* Initialize the POSIX timer facility (if included in the link) */ #ifdef CONFIG_HAVE_WEAKFUNCTIONS if (clock_initialize != NULL) #endif { clock_initialize(); } #ifndef CONFIG_DISABLE_POSIX_TIMERS #ifdef CONFIG_HAVE_WEAKFUNCTIONS if (timer_initialize != NULL) #endif { timer_initialize(); } #endif #ifndef CONFIG_DISABLE_SIGNALS /* Initialize the signal facility (if in link) */ #ifdef CONFIG_HAVE_WEAKFUNCTIONS if (sig_initialize != NULL) #endif { sig_initialize(); } #endif #ifndef CONFIG_DISABLE_MQUEUE /* Initialize the named message queue facility (if in link) */ #ifdef CONFIG_HAVE_WEAKFUNCTIONS if (mq_initialize != NULL) #endif { mq_initialize(); } #endif #ifndef CONFIG_DISABLE_PTHREAD /* Initialize the thread-specific data facility (if in link) */ #ifdef CONFIG_HAVE_WEAKFUNCTIONS if (pthread_initialize != NULL) #endif { pthread_initialize(); } #endif #if CONFIG_NFILE_DESCRIPTORS > 0 /* Initialize the file system (needed to support device drivers) */ fs_initialize(); #endif #ifdef CONFIG_NET /* Initialize the networking system. Network initialization is * performed in two steps: (1) net_setup() initializes static * configuration of the network support. This must be done prior * to registering network drivers by up_initialize(). This step * cannot require upon any hardware-depending features such as * timers or interrupts. */ net_setup(); #endif /* The processor specific details of running the operating system * will be handled here. Such things as setting up interrupt * service routines and starting the clock are some of the things * that are different for each processor and hardware platform. */ up_initialize(); #ifdef CONFIG_NET /* Complete initialization the networking system now that interrupts * and timers have been configured by up_initialize(). */ net_initialize(); #endif #ifdef CONFIG_MM_SHM /* Initialize shared memory support */ shm_initialize(); #endif /* Initialize the C libraries. This is done last because the libraries * may depend on the above. */ lib_initialize(); /* IDLE Group Initialization **********************************************/ #ifdef HAVE_TASK_GROUP /* Allocate the IDLE group */ DEBUGVERIFY(group_allocate(&g_idletcb, g_idletcb.cmn.flags)); #endif #if CONFIG_NFILE_DESCRIPTORS > 0 || CONFIG_NSOCKET_DESCRIPTORS > 0 /* Create stdout, stderr, stdin on the IDLE task. These will be * inherited by all of the threads created by the IDLE task. */ DEBUGVERIFY(group_setupidlefiles(&g_idletcb)); #endif #ifdef HAVE_TASK_GROUP /* Complete initialization of the IDLE group. Suppress retention * of child status in the IDLE group. */ DEBUGVERIFY(group_initialize(&g_idletcb)); g_idletcb.cmn.group->tg_flags = GROUP_FLAG_NOCLDWAIT; #endif /* Bring Up the System ****************************************************/ /* Create initial tasks and bring-up the system */ DEBUGVERIFY(os_bringup()); /* The IDLE Loop **********************************************************/ /* When control is return to this point, the system is idle. */ sdbg("Beginning Idle Loop\n"); for (;;) { /* Perform garbage collection (if it is not being done by the worker * thread). This cleans-up memory de-allocations that were queued * because they could not be freed in that execution context (for * example, if the memory was freed from an interrupt handler). */ #ifndef CONFIG_SCHED_WORKQUEUE /* We must have exclusive access to the memory manager to do this * BUT the idle task cannot wait on a semaphore. So we only do * the cleanup now if we can get the semaphore -- this should be * possible because if the IDLE thread is running, no other task is! * * WARNING: This logic could have undesirable side-effects if priority * inheritance is enabled. Imaginee the possible issues if the * priority of the IDLE thread were to get boosted! Moral: If you * use priority inheritance, then you should also enable the work * queue so that is done in a safer context. */ if (kmm_trysemaphore() == 0) { sched_garbagecollection(); kmm_givesemaphore(); } #endif /* Perform any processor-specific idle state operations */ up_idle(); } }
int work_usrstart(void) { /* Initialize work queue data structures */ g_usrwork.delay = CONFIG_LIB_USRWORKPERIOD / USEC_PER_TICK; dq_init(&g_usrwork.q); #ifdef CONFIG_BUILD_PROTECTED { /* Set up the work queue lock */ (void)sem_init(&g_usrsem, 0, 1); /* Start a user-mode worker thread for use by applications. */ g_usrwork.pid = task_create("uwork", CONFIG_LIB_USRWORKPRIORITY, CONFIG_LIB_USRWORKSTACKSIZE, (main_t)work_usrthread, (FAR char * const *)NULL); DEBUGASSERT(g_usrwork.pid > 0); if (g_usrwork.pid < 0) { int errcode = errno; DEBUGASSERT(errcode > 0); return -errcode; } return g_usrwork.pid; } #else { pthread_t usrwork; pthread_attr_t attr; struct sched_param param; int ret; /* Set up the work queue lock */ (void)pthread_mutex_init(&g_usrmutex, NULL); /* Start a user-mode worker thread for use by applications. */ (void)pthread_attr_init(&attr); (void)pthread_attr_setstacksize(&attr, CONFIG_LIB_USRWORKSTACKSIZE); #ifdef CONFIG_SCHED_SPORADIC /* Get the current sporadic scheduling parameters. Those will not be * modified. */ ret = set_getparam(pid, ¶m); if (ret < 0) { int erroode = get_errno(); return -errcode; } #endif param.sched_priority = CONFIG_LIB_USRWORKPRIORITY; (void)pthread_attr_setschedparam(&attr, ¶m); ret = pthread_create(&usrwork, &attr, work_usrthread, NULL); if (ret != 0) { return -ret; } /* Detach because the return value and completion status will not be * requested. */ (void)pthread_detach(usrwork); g_usrwork.pid = (pid_t)usrwork; return g_usrwork.pid; } #endif }
static int process_switch_testa() { char fpathbuff[512]; int status = 0; int recx = 0; char* strdir = NULL; MMFOR_HANDLE* mmfhp = NULL; TESTA_REC* recp = NULL; DQHEADER deque; status = 1; // Construct deque to hold TA_REC for readback test dq_init(TA_NRECS, sizeof(TESTA_REC), &deque); if (cmdarg_fetch_switch(NULL, "a")) { fprintf(stdout, "TEST-A: File of Records Test\n"); strdir = cmdarg_fetch_string(NULL, "d"); if (NULL == strdir) { fprintf(stdout, "TEST-A: Target directory not provided in cmd args\n"); exit(1); } sprintf(fpathbuff,"%s/TEST-A.DAT",strdir); fprintf(stdout, "Writing file %s\n", fpathbuff); mmfhp = mmfor_create(fpathbuff, MMA_READ_WRITE, MMF_SHARED, 0660, sizeof(TESTA_REC), TA_NRECS); for (recx = 0; recx < TA_NRECS; recx++) { char txtbuff[sizeof(recp->data)]; recp = (TESTA_REC*)mmfor_x2p(mmfhp, recx); if (NULL == recp) { fprintf(stdout, "TEST-A Fails! NULL pointer from mmfor_x2p at index %d\n", recx); exit(1); } sprintf(txtbuff, "REC-%d", recx); write_testa_rec(recp, txtbuff, recx); fprintf(stdout, "Data: [%s]\n", recp->data); dq_abd(&deque, recp); } fprintf(stdout, "\nWRITE TEST COMPLETE\n"); fprintf(stdout, "Closing test file %s\n", fpathbuff); mmfor_close(mmfhp); fprintf(stdout, "Re-opening test file %s\n", fpathbuff); mmfhp = NULL; mmfhp = mmfor_open(fpathbuff, MMA_READ_WRITE, MMF_SHARED); if (NULL == mmfhp) { fprintf(stdout, "TEST-A: mmfor_open fails!\n"); exit(1); } for (recx= 0; recx < TA_NRECS; recx++) { TESTA_REC xrec; dq_rtd(&deque, &xrec); recp = (TESTA_REC*)mmfor_x2p(mmfhp, recx); if (cmp_testa_rec(&xrec, recp)) { fprintf(stdout, "TEST-A: Readback compare fails at record %d\n", recx); exit(1); } fprintf(stdout, "Read Data: [%s]\n", recp->data); } dq_close(&deque); fprintf(stdout, "TEST-A: Completed for %d records\n", TA_NRECS); return status; } return status; }