static void *ui_shadow_eventcb(int event, void *data) { struct ui_resource_holder *rh; switch (event) { case XNSHADOW_CLIENT_ATTACH: rh = xnarch_alloc_host_mem(sizeof(*rh)); if (!rh) return ERR_PTR(-ENOMEM); initq(&rh->semq); initq(&rh->flgq); initq(&rh->mbxq); return &rh->ppd; case XNSHADOW_CLIENT_DETACH: rh = ppd2rholder((xnshadow_ppd_t *) data); ui_sem_flush_rq(&rh->semq); ui_flag_flush_rq(&rh->flgq); ui_mbx_flush_rq(&rh->mbxq); xnarch_free_host_mem(rh, sizeof(*rh)); return NULL; } return ERR_PTR(-EINVAL); }
int xnpipe_mount(void) { struct xnpipe_state *state; int i; for (state = &xnpipe_states[0]; state < &xnpipe_states[XNPIPE_NDEVS]; state++) { inith(&state->slink); inith(&state->alink); state->status = 0; state->asyncq = NULL; initq(&state->inq); initq(&state->outq); } initq(&xnpipe_sleepq); initq(&xnpipe_asyncq); xnpipe_class = class_create(THIS_MODULE, "rtpipe"); if (IS_ERR(xnpipe_class)) { xnlogerr("error creating rtpipe class, err=%ld.\n", PTR_ERR(xnpipe_class)); return -EBUSY; } for (i = 0; i < XNPIPE_NDEVS; i++) { DECLARE_DEVHANDLE(cldev); cldev = wrap_device_create(xnpipe_class, NULL, MKDEV(XNPIPE_DEV_MAJOR, i), NULL, "rtp%d", i); if (IS_ERR(cldev)) { xnlogerr ("can't add device class, major=%d, minor=%d, err=%ld\n", XNPIPE_DEV_MAJOR, i, PTR_ERR(cldev)); class_destroy(xnpipe_class); return -EBUSY; } } if (register_chrdev(XNPIPE_DEV_MAJOR, "rtpipe", &xnpipe_fops)) { xnlogerr ("unable to reserve major #%d for message pipe support.\n", XNPIPE_DEV_MAJOR); return -EBUSY; } xnpipe_wakeup_apc = rthal_apc_alloc("pipe_wakeup", &xnpipe_wakeup_proc, NULL); return 0; }
int main(int argc, char *argv[]) { FILE* input = NULL; Queue *source = initq(); int option; while ((option = getopt(argc, argv, "hv")) != EOF) { switch (option) { case 'h': usage(); break; case 'v': printf("bf version %s\nCopyright (c) %s, %s\nX11 license\n", VERSION, YEAR, AUTHOR); exit(0); break; default: usage(); break; } } if (argv[optind] == NULL) usage(); else if ((input = fopen(argv[1], "r")) == NULL) { error(NO_FILE, "main"); } else { readInput(input, source); fclose(input); execute(source); delq(source); } return 0; }
int main() { initq(&RunQ); r_sem = CreateSem(0); w_sem = CreateSem(0); mutex = CreateSem(1); start_thread(reader); start_thread(reader); start_thread(reader); start_thread(reader); start_thread(reader); start_thread(writer); start_thread(writer); start_thread(writer); run(); }
void xntimer_adjust_all_aperiodic(xnsticks_t delta) { unsigned cpu, nr_cpus; xnqueue_t adjq; initq(&adjq); delta = xnarch_ns_to_tsc(delta); for (cpu = 0, nr_cpus = xnarch_num_online_cpus(); cpu < nr_cpus; cpu++) { xnsched_t *sched = xnpod_sched_slot(cpu); xntimerq_t *q = &sched->timerqueue; xnholder_t *adjholder; xntimerh_t *holder; xntimerq_it_t it; for (holder = xntimerq_it_begin(q, &it); holder; holder = xntimerq_it_next(q, &it, holder)) { xntimer_t *timer = aplink2timer(holder); if (testbits(timer->status, XNTIMER_REALTIME)) { inith(&timer->adjlink); appendq(&adjq, &timer->adjlink); } } while ((adjholder = getq(&adjq))) { xntimer_t *timer = adjlink2timer(adjholder); xntimer_dequeue_aperiodic(timer); xntimer_adjust_aperiodic(timer, delta); } if (sched != xnpod_current_sched()) xntimer_next_remote_shot(sched); else xntimer_next_local_shot(sched); } }
void xntslave_adjust(xntslave_t *slave, xnsticks_t delta) { int nr_cpus, cpu, n; xnqueue_t adjq; initq(&adjq); for (cpu = 0, nr_cpus = xnarch_num_online_cpus(); cpu < nr_cpus; cpu++) { struct percpu_cascade *pc = &slave->cascade[cpu]; xnholder_t *adjholder; for (n = 0; n < XNTIMER_WHEELSIZE; n++) { xnqueue_t *q = &pc->wheel[n]; xntlholder_t *holder; for (holder = xntlist_head(q); holder; holder = xntlist_next(q, holder)) { xntimer_t *timer = plink2timer(holder); if (testbits(timer->status, XNTIMER_REALTIME)) { inith(&timer->adjlink); appendq(&adjq, &timer->adjlink); } } } while ((adjholder = getq(&adjq))) { xntimer_t *timer = adjlink2timer(adjholder); xntimer_dequeue_periodic(timer); xntimer_adjust_periodic(timer, delta); } } }
/* Breadth First Search */ elem_t *BFS(elem_t *r, int value) { elem_t *temp, *ret = NULL; int val = 0; q_t *q= initq(100); /* * BFS : 1. Visit r 2. Enqueue r 3. Dequeue Until queue is empty 4 For every dequeued node, visit it , and enqueue their ADJ nodes */ if (r == NULL) return ret; printf("%d ", r->value); r->count = 1; enqueue(q, (void *)r); while (!isEmpty(q)) { temp = (elem_t *)dequeue(q); if(temp->left && temp->left->count == 0) { printf("%d ", temp->left->value); temp->left->count = 1; enqueue(q, (void *)(temp->left)); } if (temp->right && temp->right->count == 0) { printf("%d ", temp->right->value); temp->right->count = 1; enqueue(q, (void *)(temp->right)); } } free(q); return ret; }
MSG_Q_ID msgQCreate(int nb_msgs, int length, int flags) { static unsigned long msgq_ids; wind_msgq_t *queue; xnflags_t bflags = 0; int i, msg_size; char *msgs_mem; spl_t s; check_NOT_ISR_CALLABLE(return 0); error_check(nb_msgs <= 0, S_msgQLib_INVALID_QUEUE_TYPE, return 0); error_check(flags & ~WIND_MSG_Q_OPTION_MASK, S_msgQLib_INVALID_QUEUE_TYPE, return 0); error_check(length < 0, S_msgQLib_INVALID_MSG_LENGTH, return 0); msgs_mem = xnmalloc(sizeof(wind_msgq_t) + nb_msgs * (sizeof(wind_msg_t) + length)); error_check(msgs_mem == NULL, S_memLib_NOT_ENOUGH_MEMORY, return 0); queue = (wind_msgq_t *)msgs_mem; msgs_mem += sizeof(wind_msgq_t); queue->magic = WIND_MSGQ_MAGIC; queue->msg_length = length; queue->free_list = NULL; initq(&queue->msgq); inith(&queue->rlink); queue->rqueue = &wind_get_rholder()->msgQq; /* init of the synch object : */ if (flags & MSG_Q_PRIORITY) bflags |= XNSYNCH_PRIO; xnsynch_init(&queue->synchbase, bflags, NULL); msg_size = sizeof(wind_msg_t) + length; for (i = 0; i < nb_msgs; ++i, msgs_mem += msg_size) free_msg(queue, (wind_msg_t *)msgs_mem); xnlock_get_irqsave(&nklock, s); appendq(queue->rqueue, &queue->rlink); xnlock_put_irqrestore(&nklock, s); sprintf(queue->name, "mq%lu", msgq_ids++); if (xnregistry_enter(queue->name, queue, &queue->handle, &msgq_pnode)) { wind_errnoset(S_objLib_OBJ_ID_ERROR); msgQDelete((MSG_Q_ID)queue); return 0; } return (MSG_Q_ID)queue; }
int SKIN_INIT(psos) { int err; initq(&__psos_global_rholder.smq); initq(&__psos_global_rholder.qq); initq(&__psos_global_rholder.ptq); initq(&__psos_global_rholder.rnq); err = xnpod_init(); if (err != 0) return err; err = xntbase_alloc("psos", tick_arg * 1000, sync_time ? 0 : XNTBISO, &psos_tbase); if (err != 0) goto fail; xntbase_start(psos_tbase); err = psosrn_init(module_param_value(rn0_size_arg)); if (err != 0) { fail: xnpod_shutdown(err); xnlogerr("pSOS skin init failed, code %d.\n", err); return err; } psossem_init(); psosqueue_init(); psospt_init(); psosasr_init(); psostm_init(); psostask_init(module_param_value(time_slice_arg)); #ifdef CONFIG_XENO_OPT_PERVASIVE psos_syscall_init(); #endif /* CONFIG_XENO_OPT_PERVASIVE */ xnprintf("starting pSOS+ services.\n"); return err; }
/* void printtree(tree t){ queue q; initq( &q ); tree p; char char1; int count = 0; int level = 0; int i, d,n = 60; d = depth( t ); enqueue( &q, t ); while( 1 ) { p = dequeue( &q ); count++; if( count == 1 ) { for( i = 0; i <= n; i++ ) printf(" "); } else { for( i = 0; i < (2 * n - 3); i++ ) printf(" "); } if( p != NULL ) printf("%d", p -> n ); else printf(""); if( p != NULL ) { enqueue( &q, p ->left ); enqueue( &q, p -> right); } if( p == NULL ) { enqueue( &q , p ); enqueue( &q , p ); } if( count == pow(2, level ) ) { printf("\n\n"); for( i = 0; i < n / 2; i++ ) printf(" "); while( count != 0) { for( i = 0; i < n; i++ ) printf("-"); for( i = 0; i < n; i++ ) printf(" "); count--; } count = 0; printf("\n\n"); level++; n = n/2; } if( level > depth( t ) ) break; } printf("\n"); return; }*/ void printtree(tree t){ if(t == NULL) return; int mid = 64, i; if(t->left == t->right){ for(i = 0; i < mid-1; i++) printf("-"); printf("%d", t->n); for(i = 0; i < mid; i++) printf("-"); printf("\n"); return; } queue q; int count = 0, d; tree p, old = NULL; initq(&q); d = depth(t); int level = 0; enqueue(&q, t); while(!isemptyq(&q) && level <= d){ count = 0; p = dequeue(&q); while(count < pow(2, level)){ if(p == old) if(isemptyq(&q)) break; else p = dequeue(&q); for(i = 0; i < mid-4; i++) printf(" "); if(p != NULL){ for(i = 0; i < 4; i++) printf("-"); printf("%d", p->n); //if(p->left != p->right){ enqueue(&q, p->left); enqueue(&q, p->right); //} } else{ printf("-"); enqueue(&q, NULL); enqueue(&q, NULL); } old = p; for(i = 0; i < 4; i++) printf("-"); for(i = 0; i < mid; i++) printf(" "); count++; } printf("\n\n\n"); mid = mid/2; level++; } }
void main(void) { PLL_init(); lcd_init(); SCI0_init(9600); SCI1_int_init(9600); // Channel to talk to ESP8266 motor0_init(); // These functions actually control PWM outputs motor1_init(); // We use them to run the RGB LED. motor2_init(); RTI_init(); SW_enable(); initq(); DDRH = 0; // PORTH is an input. result = 0; status = 'b'; // Populate binary search tree: set_lcd_addr(0); send_at_command_sci1("ATE0"); // change to ATE1 for debug status = 'i'; // Establish connection to server. send_at_command_sci1("AT+CWMODE=1"); // Set ESP to station mode send_at_command_sci1("AT+CIPMODE=0"); // Set ESP to normal transmission mode send_at_command_sci1("AT+CIPMUX=0"); // Set ESP to single-connection mode send_at_command_sci1("AT+CWJAP=\"Freynet\",\"\""); // Connect to network send_at_command_sci1("AT+CIPSTART=\"TCP\",\"fpf3.net\",12345"); // connect to server while(1){ command = '\0'; while(qempty()); command = getq(); switch (command) { case 'n': status = 'w'; result = new_sequence(); ms_delay(500); // If we finish too quickly, we open a connection the ESP thinks is already open, and it breaks. send_at_command_sci1("AT+CIPSTART=\"TCP\",\"fpf3.net\",12345"); // connect to server break; } outchar0(result); } }
int SKIN_INIT(vxworks) { int err; initq(&__wind_global_rholder.wdq); initq(&__wind_global_rholder.msgQq); initq(&__wind_global_rholder.semq); /* The following fields are unused in the global holder; still, we initialize them not to leave such data in an invalid state. */ xnsynch_init(&__wind_global_rholder.wdsynch, XNSYNCH_FIFO, NULL); initq(&__wind_global_rholder.wdpending); __wind_global_rholder.wdcount = 0; err = xnpod_init(); if (err != 0) goto fail_core; err = wind_sysclk_init(tick_arg * 1000); if (err != 0) { xnpod_shutdown(err); fail_core: xnlogerr("VxWorks skin init failed, code %d.\n", err); return err; } wind_wd_init(); wind_task_hooks_init(); wind_sem_init(); wind_msgq_init(); wind_task_init(); #ifdef CONFIG_XENO_OPT_PERVASIVE wind_syscall_init(); #endif /* CONFIG_XENO_OPT_PERVASIVE */ xnprintf("starting VxWorks services.\n"); return 0; }
int xnselect_mount(void) { initq(&xnselectors); xnselect_apc = rthal_apc_alloc("xnselectors_destroy", xnselector_destroy_loop, NULL); if (xnselect_apc < 0) return xnselect_apc; return 0; }
static void pse51_shm_init(pse51_shm_t * shm) { shm->addr = NULL; shm->size = 0; sema_init(&shm->maplock, 1); initq(&shm->mappings); inith(&shm->link); appendq(&pse51_shmq, &shm->link); }
// scheduler thread is the single consumer of tls_reuseq // producers are worker threads or scheduler thread itself. // We have a fixed number of events that are populated on first call. // If return NULL, caller should busy wait, go do something else or sleep. qitem* queue_get_item(void) { // qitem *res; if (tls_reuseq == NULL) { tls_reuseq = calloc(1,sizeof(intq)); initq(tls_reuseq); populate(tls_reuseq); } return qpop(tls_reuseq); }
/** * Initialize a selector structure. * * @param selector The selector structure to be initialized. * * @retval 0 */ int xnselector_init(struct xnselector *selector) { unsigned i; xnsynch_init(&selector->synchbase, XNSYNCH_FIFO, NULL); for (i = 0; i < XNSELECT_MAX_TYPES; i++) { __FD_ZERO__(&selector->fds[i].expected); __FD_ZERO__(&selector->fds[i].pending); } initq(&selector->bindings); return 0; }
static void *__wind_shadow_eventcb(int event, void *data) { struct wind_resource_holder *rh; switch (event) { case XNSHADOW_CLIENT_ATTACH: rh = (struct wind_resource_holder *) xnarch_alloc_host_mem(sizeof(*rh)); if (!rh) return ERR_PTR(-ENOMEM); initq(&rh->wdq); /* A single server thread pends on this. */ xnsynch_init(&rh->wdsynch, XNSYNCH_FIFO, NULL); initq(&rh->wdpending); rh->wdcount = 0; initq(&rh->msgQq); initq(&rh->semq); return &rh->ppd; case XNSHADOW_CLIENT_DETACH: rh = ppd2rholder((xnshadow_ppd_t *) data); wind_wd_flush_rq(&rh->wdq); xnsynch_destroy(&rh->wdsynch); /* No need to reschedule: all our threads have been zapped. */ wind_msgq_flush_rq(&rh->msgQq); wind_sem_flush_rq(&rh->semq); xnarch_free_host_mem(rh, sizeof(*rh)); return NULL; } return ERR_PTR(-EINVAL); }
// External API. For a task queue. // Actually uses multiple internal queues. // The main queue is X producers to 1 consumer. Scheduler threads to a worker. // Recycle queues are between all worker threads and a scheduler. // All based on initq/qpush/qpop. // Fixed size and does no allocations after first calls. queue *queue_create() { queue *ret; ret = (queue *) calloc(1,sizeof(struct queue_t)); if(ret == NULL) return NULL; if (SEM_INIT(ret->sem)) return NULL; initq(&ret->q); return ret; }
int xntbase_alloc(const char *name, u_long period, u_long flags, xntbase_t **basep) { xntslave_t *slave; xntbase_t *base; spl_t s; if (flags & ~XNTBISO) return -EINVAL; if (period == XN_APERIODIC_TICK) { *basep = &nktbase; xnarch_declare_tbase(&nktbase); return 0; } slave = (xntslave_t *)xnarch_alloc_host_mem(sizeof(*slave)); if (!slave) return -ENOMEM; base = &slave->base; base->tickvalue = period; base->ticks2sec = 1000000000UL / period; base->wallclock_offset = 0; base->jiffies = 0; base->hook = NULL; base->ops = &nktimer_ops_periodic; base->name = name; inith(&base->link); xntslave_init(slave); /* Set initial status: Not running, no time set, unlocked, isolated if requested. */ base->status = flags; *basep = base; #ifdef CONFIG_XENO_OPT_STATS initq(&base->timerq); #endif /* CONFIG_XENO_OPT_TIMING_PERIODIC */ xntbase_declare_proc(base); xnlock_get_irqsave(&nklock, s); appendq(&nktimebaseq, &base->link); xnlock_put_irqrestore(&nklock, s); xnarch_declare_tbase(base); return 0; }
// traverse a directory tree applying a function when a file is found void process(char *root) { int numOfFiles = 0; que_t nameq; char dname[MAXLENGTH]; char cname[MAXLENGTH]; char prefix[MAXLENGTH]; struct dirent *dp; DIR *dirp; initq(&nameq); enqueue(root,&nameq); while (true != queue_empty(nameq)) { peek_front(dname,nameq); dequeue(&nameq); dirp = opendir(dname); if (dirp != NULL) { // it is a directory printf("directory : %s\n",dname); strncpy(prefix, dname, MAXLENGTH); strncat(prefix,"/", MAXLENGTH); for (dp = readdir(dirp); NULL != dp; dp = readdir(dirp)) { if ((strcmp(dp->d_name,"..") != 0) && (strcmp(dp->d_name,".") != 0)) { // prevent from infinite loop strncpy(cname, prefix, MAXLENGTH); // concatenate the prefix strncat(cname, dp->d_name, MAXLENGTH); enqueue(cname,&nameq); } } closedir (dirp); } else { // test if it is a regular file and not a device or link -- TO-DO // if this is a regular file, then process it -- TO-DO printf(" processing file: %s\n", dname); numOfFiles++; } } // while printf(" a total of %d files were counted\n",numOfFiles); }
void levelordertraverse(tree t){ node *temp; queue q; initq(&q); temp = t; if(!temp) return; enqueue(&q, temp); while(!isemptyq(&q)){ temp = dequeue(&q); printf("%d\t", temp->n); if(temp->left) enqueue(&q, temp->left); if(temp->right) enqueue(&q, temp->right); } }
void initmlq(struct xnsched_mlq *q, int loprio, int hiprio) { int prio; q->elems = 0; q->loprio = loprio; q->hiprio = hiprio; q->himap = 0; memset(&q->lomap, 0, sizeof(q->lomap)); for (prio = 0; prio < XNSCHED_MLQ_LEVELS; prio++) initq(&q->queue[prio]); XENO_ASSERT(QUEUES, hiprio - loprio + 1 < XNSCHED_MLQ_LEVELS, xnpod_fatal("priority range [%d..%d] is beyond multi-level " "queue indexing capabilities", loprio, hiprio)); }
int main(int argc, char *argv[]) { //Test queue implementation. q_t *q = initq(10); char *str; for (int i=0; i<15; i++) { str = malloc(4 * sizeof(char)); sprintf(str," %d ",i); enqueue(q,(void *)str); } for (int i=0; i<15; i++) { str = (char *)dequeue(q); if(str) printf("%s",str); } //TBD test BFS here after inserting elements in a temp tree }
void estiva_rectmesh(xyc **Zp, nde **Np) { static xyc *Z; static nde *N; static que *q; double x, y, h; long i, j; initq(q); h = 0.125; if ( defop("-n") ) h = 1.0/atof(getop("-n")); if ( defop("-h") ) h = atof(getop("-h")); for ( i = 0, x = 0.0; i < 1.0/h+h/2.0; i++, x += h) for ( j = 0, y = 0.0; j < 1.0/h+h/2.0; j++, y += h) { if ( x == 0.0 || eq(x,1.0) || y == 0.0 || eq(y,1.0) ) { if ( eq(y,1.0) && x != 0.0 && !eq(x,1.0) ) pushxyc(q,x,y,"north"); else if ( y == 0.0 && x != 0.0 && !eq(x,1.0) ) pushxyc(q,x,y,"south"); else if ( x == 0.0 && y != 0.0 && !eq(y,1.0) ) pushxyc(q,x,y,"west"); else if ( eq(x,1.0) && y != 0.0 && !eq(y,1.0) ) pushxyc(q,x,y,"east"); else pushxyc(q,x,y,"zero"); } else pushxyc(q,x,y,NULL); } pushxyc(q,h/2.0,h/2.0,NULL); pushxyc(q,1.0-h/2.0,1.0-h/2.0,NULL); genmesh(q,Z,N); p2(Z,N); *Zp = Z; *Np = N; }
int main() { initq(&RunQ); int count = 0; int s=50; while(count < 100) { p[count].mutex = CreateSem(1); p[count].producer = CreateSem(10); p[count].consumer = CreateSem(0); count++; } printf("Starting first server=server1 at port %d\n",s); start_thread(server1); start_thread(client1); start_thread(client2); run(); }
int SKIN_INIT(native) { int err; initq(&__native_global_rholder.alarmq); initq(&__native_global_rholder.condq); initq(&__native_global_rholder.eventq); initq(&__native_global_rholder.heapq); initq(&__native_global_rholder.intrq); initq(&__native_global_rholder.mutexq); initq(&__native_global_rholder.pipeq); initq(&__native_global_rholder.queueq); initq(&__native_global_rholder.semq); initq(&__native_global_rholder.ioregionq); initq(&__native_global_rholder.bufferq); err = xnpod_init(); if (err) goto fail; err = xntbase_alloc("native", tick_arg * 1000, 0, &__native_tbase); if (err) goto fail; xntbase_start(__native_tbase); err = __native_misc_pkg_init(); if (err) goto cleanup_pod; err = __native_task_pkg_init(); if (err) goto cleanup_misc; err = __native_sem_pkg_init(); if (err) goto cleanup_task; err = __native_event_pkg_init(); if (err) goto cleanup_sem; err = __native_mutex_pkg_init(); if (err) goto cleanup_event; err = __native_cond_pkg_init(); if (err) goto cleanup_mutex; err = __native_pipe_pkg_init(); if (err) goto cleanup_cond; err = __native_queue_pkg_init(); if (err) goto cleanup_pipe; err = __native_heap_pkg_init(); if (err) goto cleanup_queue; err = __native_alarm_pkg_init(); if (err) goto cleanup_heap; err = __native_intr_pkg_init(); if (err) goto cleanup_alarm; err = __native_syscall_init(); if (err) goto cleanup_intr; xnprintf("starting native API services.\n"); return 0; /* SUCCESS. */ cleanup_intr: __native_intr_pkg_cleanup(); cleanup_alarm: __native_alarm_pkg_cleanup(); cleanup_heap: __native_heap_pkg_cleanup(); cleanup_queue: __native_queue_pkg_cleanup(); cleanup_pipe: __native_pipe_pkg_cleanup(); cleanup_cond: __native_cond_pkg_cleanup(); cleanup_mutex: __native_mutex_pkg_cleanup(); cleanup_event: __native_event_pkg_cleanup(); cleanup_sem: __native_sem_pkg_cleanup(); cleanup_task: __native_task_pkg_cleanup(); cleanup_misc: __native_misc_pkg_cleanup(); cleanup_pod: xntbase_free(__native_tbase); xnpod_shutdown(err); fail: xnlogerr("native skin init failed, code %d.\n", err); return err; }
/** * Initialize a @a struct @a xnselect structure. * * This service must be called to initialize a @a struct @a xnselect structure * before it is bound to a selector by the means of xnselect_bind(). * * @param select_block pointer to the xnselect structure to be initialized */ void xnselect_init(struct xnselect *select_block) { initq(&select_block->bindings); }
void pse51_sem_pkg_init(void) { initq(&pse51_global_kqueues.semq); }
int vrtxmx_init(void) { initq(&vrtx_mx_q); vrtx_mx_idmap = xnmap_create(VRTX_MAX_MUTEXES, 0, 0); return vrtx_mx_idmap ? 0 : -ENOMEM; }
int pse51_shm_pkg_init(void) { initq(&pse51_shmq); return 0; }