static VOID setup_context(proc_member * member) { pcb * newProc = kernel_malloc(sizeof(pcb)); BYTE * stack = kernel_malloc(member->stack_size); printf("process PID %d '%s', priority %d, at address 0x%x", member->pid, pid_to_string(member->pid), member->priority, (int) member->function); if (newProc == NULL || stack == NULL) { printf("Kernel panic: out of memory for the processes"); return; } newProc->pid = member->pid; newProc->priority = member->priority; init_rtx_queue(&newProc->mailBox); newProc->procType = member->procType; newProc->state = READY; *(UINT32*) ((UINT32) stack + member->stack_size - 4) = (UINT32) member->function; if (member->priority == I_PRIORITY) *(UINT32*) ((UINT32) stack + member->stack_size - 8) = 0x40000700; else *(UINT32*) ((UINT32) stack + member->stack_size - 8) = 0x40000000; newProc->stack_pointer = ((UINT32) stack + member->stack_size - 68); push(&state_q, newProc); }
void * kernel_realloc(void *cp, size_t nbytes) { size_t cur_space; /* Space in the current bucket */ size_t smaller_space; /* Space in the next smaller bucket */ union overhead *op; char *res; if (cp == NULL) return (kernel_malloc(nbytes)); op = find_overhead(cp); if (op == NULL) return (NULL); cur_space = (1 << (op->ov_index + 3)) - sizeof(*op); /* avoid the copy if same size block */ /* * XXX-BD: Arguably we should be tracking the actual allocation * not just the bucket size so that we can do a full malloc+memcpy * when the caller has restricted the length of the pointer passed * realloc() but is growing the buffer within the current bucket. * * As it is, this code contains a leak where realloc recovers access * to the contents in foo: * char *foo = malloc(10); * strcpy(foo, "abcdefghi"); * cheri_setbouds(foo, 5); * foo = realloc(foo, 10); */ smaller_space = (1 << (op->ov_index + 2)) - sizeof(*op); if (nbytes <= cur_space && nbytes > smaller_space) return (cheri_andperm(cheri_setbounds(op + 1, nbytes), cheri_getperm(cp))); if ((res = kernel_malloc(nbytes)) == NULL) return (NULL); /* * Only copy data the caller had access to even if this is less * than the size of the original allocation. This risks surprise * for some programmers, but to do otherwise risks information leaks. */ memcpy(res, cp, (nbytes <= cheri_getlen(cp)) ? nbytes : cheri_getlen(cp)); res = cheri_andperm(res, cheri_getperm(cp)); kernel_free(cp); return (res); }
void dnp_mailbox_init(uint32_t ndev) { dnp_mailbox_nentries = ndev; dnp_mailboxes = kernel_malloc(2*ndev*sizeof(dnp_mailbox_t *), true); return; }
/** mailbox_allocate * Description: allocates space for the mailbox in both direction */ status_t dnp_mailbox_allocate(uint32_t channel_idx) { dnp_mailbox_t *mb_in, *mb_out; status_t ret_val; watch (status_t) { mb_in = (dnp_mailbox_t *)kernel_malloc(sizeof(dnp_mailbox_t),1); mb_in -> status = 0; mb_in -> nr = mb_in ->nw = 0; ret_val = semaphore_create("mailbox_in", 0, &mb_in->sem); check (sem_error, ret_val == DNA_OK, DNA_ERROR); mb_out = (dnp_mailbox_t *)kernel_malloc(sizeof(dnp_mailbox_t),1); mb_out -> nr = mb_out ->nw = 0; mb_out ->status = 0; ret_val = semaphore_create("mailbox_out", 0, &mb_out->sem); check (sem_error, ret_val == DNA_OK, DNA_ERROR); /* Init all the semaphores */ while(semaphore_acquire(mb_in->sem, 1, DNA_RELATIVE_TIMEOUT, 0) == DNA_OK); while(semaphore_acquire(mb_out->sem, 1, DNA_RELATIVE_TIMEOUT, 0) == DNA_OK); dnp_mailboxes[2*channel_idx] = mb_in; dnp_mailboxes[2*channel_idx+1] = mb_out; return DNA_OK; } rescue (sem_error) { EMSG("Failed: no sem initialized"); leave; } }
ER_UINT acre_pdq(const T_CPDQ *pk_cpdq) { PDQCB *p_pdqcb; PDQINIB *p_pdqinib; ATR pdqatr; PDQMB *p_pdqmb; ER ercd; LOG_ACRE_PDQ_ENTER(pk_cpdq); CHECK_TSKCTX_UNL(); CHECK_RSATR(pk_cpdq->pdqatr, TA_TPRI); CHECK_DPRI(pk_cpdq->maxdpri); pdqatr = pk_cpdq->pdqatr; p_pdqmb = pk_cpdq->pdqmb; t_lock_cpu(); if (queue_empty(&free_pdqcb)) { ercd = E_NOID; } else { if (pk_cpdq->pdqcnt != 0 && p_pdqmb == NULL) { p_pdqmb = kernel_malloc(sizeof(PDQMB) * pk_cpdq->pdqcnt); pdqatr |= TA_MBALLOC; } if (pk_cpdq->pdqcnt != 0 && p_pdqmb == NULL) { ercd = E_NOMEM; } else { p_pdqcb = ((PDQCB *) queue_delete_next(&free_pdqcb)); p_pdqinib = (PDQINIB *)(p_pdqcb->p_pdqinib); p_pdqinib->pdqatr = pdqatr; p_pdqinib->pdqcnt = pk_cpdq->pdqcnt; p_pdqinib->maxdpri = pk_cpdq->maxdpri; p_pdqinib->p_pdqmb = p_pdqmb; queue_initialize(&(p_pdqcb->swait_queue)); queue_initialize(&(p_pdqcb->rwait_queue)); p_pdqcb->count = 0U; p_pdqcb->p_head = NULL; p_pdqcb->unused = 0U; p_pdqcb->p_freelist = NULL; ercd = PDQID(p_pdqcb); } } t_unlock_cpu(); error_exit: LOG_ACRE_PDQ_LEAVE(ercd); return(ercd); }
status_t block_device_init_driver (void) { int32_t i ; char alpha_num[8], semaphore_name_buffer[64], * semaphore_prefix = "block_device_" ; char isr_semaphore_name[64] ; watch (status_t) { block_device_controls = kernel_malloc (sizeof (block_device_control_t) * SOCLIB_BLOCK_DEVICES_NDEV, true) ; ensure (block_device_controls != NULL, DNA_OUT_OF_MEM) ; for (i = 0 ; i < SOCLIB_BLOCK_DEVICES_NDEV ; i++) { dna_itoa (i, alpha_num) ; dna_strcpy (semaphore_name_buffer, semaphore_prefix) ; dna_strcat (semaphore_name_buffer, alpha_num) ; dna_strcat (semaphore_name_buffer, "_sem") ; semaphore_create (semaphore_name_buffer, 1, &block_device_controls[i] . semaphore_id) ; block_device_controls[i] . should_enable_irq = (bool) SOCLIB_BLOCK_DEVICES[i] . should_enable_irq ; block_device_controls[i] . port = (block_device_register_map_t) SOCLIB_BLOCK_DEVICES[i] . base_address ; cpu_read (UINT32, & (block_device_controls[i] . port -> BLOCK_DEVICE_SIZE), block_device_controls[i] . block_count) ; cpu_read (UINT32, & (block_device_controls[i] . port -> BLOCK_DEVICE_BLOCK_SIZE), block_device_controls[i] . block_size) ; if (block_device_controls[i] . should_enable_irq) { block_device_controls[i] . irq = SOCLIB_BLOCK_DEVICES[i] . irq ; interrupt_attach (0, SOCLIB_BLOCK_DEVICES[i] . irq, 0x0, block_device_isr, false) ; dna_strcpy (isr_semaphore_name, semaphore_name_buffer) ; dna_strcat (isr_semaphore_name, "_isr") ; semaphore_create (isr_semaphore_name, 0, &block_device_controls[i] . isr_semaphore_id) ; } } return DNA_OK ; } }
void * kernel_calloc(size_t num, size_t size) { void *ret; if (size != 0 && (num * size) / size != num) { /* size_t overflow. */ return (NULL); } if ((ret = kernel_malloc(num * size)) != NULL) memset(ret, 0, num * size); return (ret); }
ER_UINT acre_dtq(const T_CDTQ *pk_cdtq) { DTQCB *p_dtqcb; DTQINIB *p_dtqinib; ATR dtqatr; DTQMB *p_dtqmb; ER ercd; LOG_ACRE_DTQ_ENTER(pk_cdtq); CHECK_TSKCTX_UNL(); CHECK_RSATR(pk_cdtq->dtqatr, TA_TPRI); dtqatr = pk_cdtq->dtqatr; p_dtqmb = pk_cdtq->dtqmb; t_lock_cpu(); if (queue_empty(&free_dtqcb)) { ercd = E_NOID; } else { if (pk_cdtq->dtqcnt != 0 && p_dtqmb == NULL) { p_dtqmb = kernel_malloc(sizeof(DTQMB) * pk_cdtq->dtqcnt); dtqatr |= TA_MBALLOC; } if (pk_cdtq->dtqcnt != 0 && p_dtqmb == NULL) { ercd = E_NOMEM; } else { p_dtqcb = ((DTQCB *) queue_delete_next(&free_dtqcb)); p_dtqinib = (DTQINIB *)(p_dtqcb->p_dtqinib); p_dtqinib->dtqatr = dtqatr; p_dtqinib->dtqcnt = pk_cdtq->dtqcnt; p_dtqinib->p_dtqmb = p_dtqmb; queue_initialize(&(p_dtqcb->swait_queue)); queue_initialize(&(p_dtqcb->rwait_queue)); p_dtqcb->count = 0U; p_dtqcb->head = 0U; p_dtqcb->tail = 0U; ercd = DTQID(p_dtqcb); } } t_unlock_cpu(); error_exit: LOG_ACRE_DTQ_LEAVE(ercd); return(ercd); }
status_t dnp_rdma_control(void * handler, int32_t function, va_list arguments, int32_t * p_ret){ dnp_rdma_file_t *file = (dnp_rdma_file_t *)handler; int32_t virt_channel = va_arg(arguments, int32_t); int32_t channel_idx = -1; DMSG("control()\n"); switch (function) { case DNP_CONNECT: channel_idx = dnp_channels_virt_to_dev[virt_channel]; if( (virt_channel >= DNP_CHANNELS_NVIRT) || (channel_idx == -1) ){ EMSG("Error channel id unknown (%d/%d)\n", virt_channel, channel_idx); return DNA_ERROR; }else{ DMSG("Connecting to channel (%d/%d)\n", virt_channel, channel_idx); } file->channel = &DNP_CHANNELS[channel_idx]; rdma_engine_open(file->channel->id); file->lbuffer = kernel_malloc(file->channel->eager_rdv_threshold*sizeof(uint32_t), false); file->lbuf_pos = 0; file->lbuf_size = 0; file->status = CHANNEL_READY; *p_ret = 0; break; default: return DNA_ERROR; } return DNA_OK; }
status_t interrupt_attach (int32_t cpuid, interrupt_id_t id, int32_t mode, interrupt_handler_t handler, bool bypass_demux) /* * ARGUMENTS * * cpuid : the ID of the target processor * * id : the ID of the interrupt to attach * * mode : the mode of the attach * * handler : handler of the interrupt * * bypass_demux : handler has to be installed directly * * RESULT * * DNA_BAD_ARGUMENT: on of the arguments is not correct * * DNA_OUT_OF_MEM: cannot allocate the necessary memory to create the ISR * * DNA_ERROR: there are more than one ISR and bypass_demux is set * * DNA_OK: the operation succeeded * * SOURCE */ { isr_t isr = NULL; queue_t * queue = NULL; interrupt_status_t it_status = 0; watch (status_t) { ensure (id < cpu_trap_count (), DNA_BAD_ARGUMENT); ensure (cpuid < cpu_mp_count (), DNA_BAD_ARGUMENT); /* * Create the new ISR */ isr = kernel_malloc (sizeof (struct _isr), true); ensure (isr != NULL, DNA_OUT_OF_MEM); isr -> handler = handler; /* * Add the new ISR in the appropriate queue */ it_status = cpu_trap_mask_and_backup(); queue = & cpu_pool . cpu[cpuid] . isr[id]; lock_acquire (& queue -> lock); queue_add (queue, isr); check (not_alone, ! bypass_demux || (bypass_demux && queue -> status == 1), DNA_ERROR); if (queue -> status == 1) { if (bypass_demux) { cpu_trap_attach_isr (cpuid, id, mode, handler); } else { cpu_trap_attach_isr (cpuid, id, mode, interrupt_demultiplexer); } if (cpuid == cpu_mp_id ()) { cpu_trap_enable (id); } else { lock_acquire (& cpu_pool . cpu[cpuid] . ipi_lock); cpu_mp_send_ipi (cpuid, DNA_IPI_TRAP_ENABLE, (void *) id); } } lock_release (& queue -> lock); cpu_trap_restore(it_status); return DNA_OK; } rescue (not_alone) { queue_extract (queue, isr); lock_release (& queue -> lock); cpu_trap_restore(it_status); kernel_free (isr); leave; } }
ER_UINT acre_tsk(const T_CTSK *pk_ctsk) { ID domid; const DOMINIB *p_dominib; TCB *p_tcb; TINIB *p_tinib; ATR tskatr; SIZE sstksz, ustksz; void *sstk, *ustk; ACPTN acptn; ER ercd; LOG_ACRE_TSK_ENTER(pk_ctsk); CHECK_TSKCTX_UNL(); CHECK_MACV_READ(pk_ctsk, T_CTSK); CHECK_RSATR(pk_ctsk->tskatr, TA_ACT|TARGET_TSKATR|TA_DOMMASK); domid = get_atrdomid(pk_ctsk->tskatr); CHECK_ATRDOMID_ACTIVE(domid); CHECK_ALIGN_FUNC(pk_ctsk->task); CHECK_NONNULL_FUNC(pk_ctsk->task); CHECK_TPRI(pk_ctsk->itskpri); p_dominib = (domid == TDOM_SELF) ? p_runtsk->p_tinib->p_dominib : (domid == TDOM_KERNEL) ? &dominib_kernel : get_dominib(domid); if (p_dominib == &dominib_kernel) { /* * システムタスクの場合 */ ustksz = 0U; ustk = NULL; CHECK_PAR(pk_ctsk->sstk == NULL); CHECK_PAR(pk_ctsk->stksz > 0U); sstksz = pk_ctsk->stksz; sstk = pk_ctsk->stk; if (sstk != NULL) { CHECK_PAR(pk_ctsk->sstksz == 0U); } else { sstksz += pk_ctsk->sstksz; } } else { /* * ユーザタスクの場合 */ ustksz = pk_ctsk->stksz; ustk = pk_ctsk->stk; CHECK_PAR(ustksz >= TARGET_MIN_USTKSZ); CHECK_NOSPT(ustk != NULL); CHECK_TARGET_USTACK(ustksz, ustk, p_dominib); sstksz = pk_ctsk->sstksz; sstk = pk_ctsk->sstk; } CHECK_PAR(sstksz >= TARGET_MIN_SSTKSZ); if (sstk != NULL) { CHECK_ALIGN_STKSZ(sstksz); CHECK_ALIGN_STACK(sstk); } CHECK_ACPTN(sysstat_acvct.acptn3); tskatr = pk_ctsk->tskatr; t_lock_cpu(); if (queue_empty(&free_tcb)) { ercd = E_NOID; } else { if (sstk == NULL) { sstk = kernel_malloc(ROUND_STK_T(sstksz)); tskatr |= TA_MEMALLOC; } if (sstk == NULL) { ercd = E_NOMEM; } else { p_tcb = ((TCB *) queue_delete_next(&free_tcb)); p_tinib = (TINIB *)(p_tcb->p_tinib); p_tinib->p_dominib = p_dominib; p_tinib->tskatr = tskatr; p_tinib->exinf = pk_ctsk->exinf; p_tinib->task = pk_ctsk->task; p_tinib->ipriority = INT_PRIORITY(pk_ctsk->itskpri); #ifdef USE_TSKINICTXB init_tskinictxb(&(p_tinib->tskinictxb), p_dominib, sstksz, sstk, utsksz, ustk, pk_ctsk); #else /* USE_TSKINICTXB */ p_tinib->sstksz = sstksz; p_tinib->sstk = sstk; p_tinib->ustksz = ustksz; p_tinib->ustk = ustk; #endif /* USE_TSKINICTXB */ p_tinib->texatr = TA_NULL; p_tinib->texrtn = NULL; acptn = default_acptn(domid); p_tinib->acvct.acptn1 = acptn; p_tinib->acvct.acptn2 = acptn; p_tinib->acvct.acptn3 = acptn | rundom; p_tinib->acvct.acptn4 = acptn; p_tcb->actque = false; make_dormant(p_tcb); queue_initialize(&(p_tcb->mutex_queue)); if ((p_tcb->p_tinib->tskatr & TA_ACT) != 0U) { make_active(p_tcb); } ercd = TSKID(p_tcb); } } t_unlock_cpu(); error_exit: LOG_ACRE_TSK_LEAVE(ercd); return(ercd); }
ER_UINT acre_mpf(const T_CMPF *pk_cmpf) { MPFCB *p_mpfcb; MPFINIB *p_mpfinib; ATR mpfatr; void *mpf; MPFMB *p_mpfmb; ER ercd; LOG_ACRE_MPF_ENTER(pk_cmpf); CHECK_TSKCTX_UNL(); CHECK_RSATR(pk_cmpf->mpfatr, TA_TPRI); CHECK_PAR(pk_cmpf->blkcnt != 0); CHECK_PAR(pk_cmpf->blksz != 0); if (pk_cmpf->mpf != NULL) { CHECK_PAR(MPF_ALIGN(pk_cmpf->mpf)); } if (pk_cmpf->mpfmb != NULL) { CHECK_PAR(MB_ALIGN(pk_cmpf->mpfmb)); } mpfatr = pk_cmpf->mpfatr; mpf = pk_cmpf->mpf; p_mpfmb = pk_cmpf->mpfmb; lock_cpu(); if (tnum_mpf == 0 || queue_empty(&free_mpfcb)) { ercd = E_NOID; } else { if (mpf == NULL) { mpf = kernel_malloc(ROUND_MPF_T(pk_cmpf->blksz) * pk_cmpf->blkcnt); mpfatr |= TA_MEMALLOC; } if (mpf == NULL) { ercd = E_NOMEM; } else { if (p_mpfmb == NULL) { p_mpfmb = kernel_malloc(sizeof(MPFMB) * pk_cmpf->blkcnt); mpfatr |= TA_MBALLOC; } if (p_mpfmb == NULL) { if (pk_cmpf->mpf == NULL) { kernel_free(mpf); } ercd = E_NOMEM; } else { p_mpfcb = ((MPFCB *) queue_delete_next(&free_mpfcb)); p_mpfinib = (MPFINIB *)(p_mpfcb->p_mpfinib); p_mpfinib->mpfatr = mpfatr; p_mpfinib->blkcnt = pk_cmpf->blkcnt; p_mpfinib->blksz = ROUND_MPF_T(pk_cmpf->blksz); p_mpfinib->mpf = mpf; p_mpfinib->p_mpfmb = p_mpfmb; queue_initialize(&(p_mpfcb->wait_queue)); p_mpfcb->fblkcnt = p_mpfcb->p_mpfinib->blkcnt; p_mpfcb->unused = 0U; p_mpfcb->freelist = INDEX_NULL; ercd = MPFID(p_mpfcb); } } } unlock_cpu(); error_exit: LOG_ACRE_MPF_LEAVE(ercd); return(ercd); }
ER_UINT acre_tsk(const T_CTSK *pk_ctsk) { TCB *p_tcb; TINIB *p_tinib; ATR tskatr; TASK task; PRI itskpri; size_t stksz; STK_T *stk; ER ercd; LOG_ACRE_TSK_ENTER(pk_ctsk); CHECK_TSKCTX_UNL(); tskatr = pk_ctsk->tskatr; task = pk_ctsk->task; itskpri = pk_ctsk->itskpri; stksz = pk_ctsk->stksz; stk = pk_ctsk->stk; CHECK_RSATR(tskatr, TA_ACT|TA_NOACTQUE|TARGET_TSKATR); CHECK_PAR(FUNC_ALIGN(task)); CHECK_PAR(FUNC_NONNULL(task)); CHECK_PAR(VALID_TPRI(itskpri)); CHECK_PAR(stksz >= TARGET_MIN_STKSZ); if (stk != NULL) { CHECK_PAR(STKSZ_ALIGN(stksz)); CHECK_PAR(STACK_ALIGN(stk)); } lock_cpu(); if (queue_empty(&free_tcb)) { ercd = E_NOID; } else { if (stk == NULL) { stk = kernel_malloc(ROUND_STK_T(stksz)); tskatr |= TA_MEMALLOC; } if (stk == NULL) { ercd = E_NOMEM; } else { p_tcb = ((TCB *) queue_delete_next(&free_tcb)); p_tinib = (TINIB *)(p_tcb->p_tinib); p_tinib->tskatr = tskatr; p_tinib->exinf = pk_ctsk->exinf; p_tinib->task = task; p_tinib->ipriority = INT_PRIORITY(itskpri); #ifdef USE_TSKINICTXB init_tskinictxb(&(p_tinib->tskinictxb), stksz, stk); #else /* USE_TSKINICTXB */ p_tinib->stksz = stksz; p_tinib->stk = stk; #endif /* USE_TSKINICTXB */ p_tcb->actque = false; make_dormant(p_tcb); if ((p_tcb->p_tinib->tskatr & TA_ACT) != 0U) { make_active(p_tcb); } ercd = TSKID(p_tcb); } } unlock_cpu(); error_exit: LOG_ACRE_TSK_LEAVE(ercd); return(ercd); }
ER_UINT acre_pdq(const T_CPDQ *pk_cpdq) { ID domid; PDQCB *p_pdqcb; PDQINIB *p_pdqinib; ATR pdqatr; PDQMB *p_pdqmb; ACPTN acptn; ER ercd; LOG_ACRE_PDQ_ENTER(pk_cpdq); CHECK_TSKCTX_UNL(); CHECK_MACV_READ(pk_cpdq, T_CPDQ); CHECK_RSATR(pk_cpdq->pdqatr, TA_TPRI|TA_DOMMASK); domid = get_atrdomid(pk_cpdq->pdqatr); CHECK_ATRDOMID_INACTIVE(domid); CHECK_DPRI(pk_cpdq->maxdpri); if (pk_cpdq->pdqmb != NULL) { CHECK_ALIGN_MB(pk_cpdq->pdqmb); CHECK_OBJ(valid_memobj_kernel(pk_cpdq->pdqmb, sizeof(PDQMB) * pk_cpdq->pdqcnt)); } CHECK_ACPTN(sysstat_acvct.acptn3); pdqatr = pk_cpdq->pdqatr; p_pdqmb = pk_cpdq->pdqmb; t_lock_cpu(); if (tnum_pdq == 0 || queue_empty(&free_pdqcb)) { ercd = E_NOID; } else { if (pk_cpdq->pdqcnt != 0 && p_pdqmb == NULL) { p_pdqmb = kernel_malloc(sizeof(PDQMB) * pk_cpdq->pdqcnt); pdqatr |= TA_MBALLOC; } if (pk_cpdq->pdqcnt != 0 && p_pdqmb == NULL) { ercd = E_NOMEM; } else { p_pdqcb = ((PDQCB *) queue_delete_next(&free_pdqcb)); p_pdqinib = (PDQINIB *)(p_pdqcb->p_pdqinib); p_pdqinib->pdqatr = pdqatr; p_pdqinib->pdqcnt = pk_cpdq->pdqcnt; p_pdqinib->maxdpri = pk_cpdq->maxdpri; p_pdqinib->p_pdqmb = p_pdqmb; acptn = default_acptn(domid); p_pdqinib->acvct.acptn1 = acptn; p_pdqinib->acvct.acptn2 = acptn; p_pdqinib->acvct.acptn3 = acptn | rundom; p_pdqinib->acvct.acptn4 = acptn; queue_initialize(&(p_pdqcb->swait_queue)); queue_initialize(&(p_pdqcb->rwait_queue)); p_pdqcb->count = 0U; p_pdqcb->p_head = NULL; p_pdqcb->unused = 0U; p_pdqcb->p_freelist = NULL; ercd = PDQID(p_pdqcb); } } t_unlock_cpu(); error_exit: LOG_ACRE_PDQ_LEAVE(ercd); return(ercd); }
status_t port_create (char * name, int32_t queue_length, int32_t * p_id) /* * ARGUMENTS * * name : the name of the port. * * queue_length : the length of its queue. * * RESULT * * DNA_NO_MORE_PORT: no more port available * * DNA_OUT_OF_MEM: cannot allocate memory to create a port * * DNA_OK: the operation succeeded * * SOURCE */ { int16_t index; port_t port = NULL; status_t status; interrupt_status_t it_status = 0; watch (status_t) { ensure (name != NULL && p_id != NULL, DNA_BAD_ARGUMENT); ensure (queue_length > 0, DNA_BAD_ARGUMENT); it_status = cpu_trap_mask_and_backup(); lock_acquire (& port_pool . lock); /* * Get an empty port slot. */ port = queue_rem (& port_pool . port); check (pool_error, port != NULL, DNA_NO_MORE_PORT); /* * Make the place clean. */ index = port -> id . s . index; dna_memset (port, 0, sizeof (struct _port)); port -> id . s . index = index; port -> id . s . value = port_pool . counter; semaphore_pool . counter += 1; lock_release (& port_pool . lock); cpu_trap_restore(it_status); /* * Creating the messages. */ port -> data = kernel_malloc (sizeof (struct _message) * queue_length, true); check (no_mem, port -> data != NULL, DNA_OUT_OF_MEM); for (int32_t i = 0; i < queue_length; i += 1) { queue_add (& port -> message, & port -> data[i]); } /* * Creating the semaphores. */ status = semaphore_create (name, queue_length, & port -> write_sem); check (wsem_error, status == DNA_OK, status); status = semaphore_create (name, 0, & port -> read_sem); check (rsem_error, status == DNA_OK, status); dna_strcpy (port -> info . name, name); port -> info . capacity = queue_length; /* * Return the port information. */ *p_id = port -> id . raw; return DNA_OK; } rescue (rsem_error) { semaphore_destroy (port -> write_sem); } rescue (wsem_error) { kernel_free (port -> data); } rescue (no_mem) { it_status = cpu_trap_mask_and_backup(); lock_acquire (& port_pool . lock); queue_add (& port_pool . port, port); } rescue (pool_error) { lock_release (& port_pool . lock); cpu_trap_restore(it_status); leave; } }