status_t semaphore_destroy (int32_t id) /* * ARGUMENTS * * id : the semaphore id. * * RESULT * * DNA_BAD_SEM_ID: the id parameter is invalid * * DNA_OK: the operation succeeded * * SOURCE */ { thread_t thread = NULL; semaphore_t sem = NULL; semaphore_id_t sid = { .raw = id }; interrupt_status_t it_status = 0; bool smart_to_reschedule = false; status_t status; watch (status_t) { ensure (sid . s . index < DNA_MAX_SEM, DNA_BAD_SEM_ID); it_status = cpu_trap_mask_and_backup(); lock_acquire (& semaphore_pool . lock); /* * Look for the semaphore with ID id. If found, * remove its entry from the pool. */ sem = semaphore_pool . semaphore[sid . s . index]; check (invalid_semaphore, sem != NULL, DNA_BAD_SEM_ID); check (invalid_semaphore, sem -> id . raw == sid . raw, DNA_BAD_SEM_ID); semaphore_pool . semaphore[sid . s . index] = NULL; lock_acquire (& sem -> lock); lock_release (& semaphore_pool . lock); /* * Reschedule each waiting thread, and * reset its information. */ lock_acquire (& sem -> waiting_queue . lock); while ((thread = queue_rem (& sem -> waiting_queue)) != NULL) { lock_acquire (& thread -> lock); thread -> info . sem_tokens = 0; thread -> info . resource = DNA_NO_RESOURCE; thread -> info . resource_id = -1; if (thread -> info . status == DNA_THREAD_WAITING) { thread -> info . status = DNA_THREAD_READY; status = scheduler_dispatch (thread); smart_to_reschedule = smart_to_reschedule || (status == DNA_INVOKE_SCHEDULER); } lock_release (& thread -> lock); } lock_release (& sem -> waiting_queue . lock); /* * Delete the semaphore's memory. */ kernel_free (sem); return smart_to_reschedule ? DNA_INVOKE_SCHEDULER : DNA_OK; } rescue (invalid_semaphore) { lock_release (& semaphore_pool . lock); cpu_trap_restore(it_status); leave; } }
ER_UINT acre_mpf(const T_CMPF *pk_cmpf) { MPFCB *p_mpfcb; MPFINIB *p_mpfinib; ATR mpfatr; void *mpf; MPFMB *p_mpfmb; ER ercd; LOG_ACRE_MPF_ENTER(pk_cmpf); CHECK_TSKCTX_UNL(); CHECK_RSATR(pk_cmpf->mpfatr, TA_TPRI); CHECK_PAR(pk_cmpf->blkcnt != 0); CHECK_PAR(pk_cmpf->blksz != 0); if (pk_cmpf->mpf != NULL) { CHECK_PAR(MPF_ALIGN(pk_cmpf->mpf)); } if (pk_cmpf->mpfmb != NULL) { CHECK_PAR(MB_ALIGN(pk_cmpf->mpfmb)); } mpfatr = pk_cmpf->mpfatr; mpf = pk_cmpf->mpf; p_mpfmb = pk_cmpf->mpfmb; lock_cpu(); if (tnum_mpf == 0 || queue_empty(&free_mpfcb)) { ercd = E_NOID; } else { if (mpf == NULL) { mpf = kernel_malloc(ROUND_MPF_T(pk_cmpf->blksz) * pk_cmpf->blkcnt); mpfatr |= TA_MEMALLOC; } if (mpf == NULL) { ercd = E_NOMEM; } else { if (p_mpfmb == NULL) { p_mpfmb = kernel_malloc(sizeof(MPFMB) * pk_cmpf->blkcnt); mpfatr |= TA_MBALLOC; } if (p_mpfmb == NULL) { if (pk_cmpf->mpf == NULL) { kernel_free(mpf); } ercd = E_NOMEM; } else { p_mpfcb = ((MPFCB *) queue_delete_next(&free_mpfcb)); p_mpfinib = (MPFINIB *)(p_mpfcb->p_mpfinib); p_mpfinib->mpfatr = mpfatr; p_mpfinib->blkcnt = pk_cmpf->blkcnt; p_mpfinib->blksz = ROUND_MPF_T(pk_cmpf->blksz); p_mpfinib->mpf = mpf; p_mpfinib->p_mpfmb = p_mpfmb; queue_initialize(&(p_mpfcb->wait_queue)); p_mpfcb->fblkcnt = p_mpfcb->p_mpfinib->blkcnt; p_mpfcb->unused = 0U; p_mpfcb->freelist = INDEX_NULL; ercd = MPFID(p_mpfcb); } } } unlock_cpu(); error_exit: LOG_ACRE_MPF_LEAVE(ercd); return(ercd); }
status_t file_put (int16_t fd) /* * ARGUMENTS * * fd : the file descriptor. * * FUNCTION * Check if the fd entry exist in the current group pool. If it is the case, * decrements its usage counter, and delete the file when the counter reaches 0. * * RESULT * * DNA_INVALID_FD if fd is not a valid file * * DNA_OK if the operation succeed * * SOURCE */ { file_t file; int32_t tid; thread_info_t info; status_t status = DNA_OK; interrupt_status_t it_status = 0; vnode_t vnode = NULL; watch (status_t) { status = thread_find (NULL, & tid); ensure (status == DNA_OK, status); status = thread_get_info (tid, & info); ensure (status == DNA_OK, status); ensure (info . group >= 0, DNA_BAD_ARGUMENT); ensure (info . group < DNA_MAX_GROUP, DNA_BAD_ARGUMENT); /* * Look for the file in the pool. */ it_status = cpu_trap_mask_and_backup(); lock_acquire (& file_pool . lock); file = file_pool . file[info . group][fd]; check (error, file != NULL, DNA_INVALID_FD); check (error, file -> usage_counter > 0, DNA_ERROR); atomic_add (& file -> usage_counter, -1); if (file -> usage_counter == 0 && file -> destroy) { file_pool . file[info . group][fd] = NULL; lock_release (& file_pool . lock); cpu_trap_restore(it_status); status = file -> vnode -> volume -> cmd -> free (file -> vnode -> volume -> data, file -> vnode -> data, file -> data); panic (status != DNA_OK); vnode = file -> vnode; kernel_free (file); status = vnode_put (vnode -> volume -> id, vnode -> id); panic (status != DNA_OK); } else { lock_release (& file_pool . lock); cpu_trap_restore(it_status); } dna_log(VERBOSE_LEVEL, "Put FD %d.", fd); return DNA_OK; } rescue (error) { lock_release (& file_pool . lock); cpu_trap_restore(it_status); leave; } }
status_t interrupt_detach (int32_t cpuid, interrupt_id_t id, interrupt_handler_t handler) /* * ARGUMENTS * * cpuid : id of the target CPU * * id : an interrupt ID * * handler : handler of the interrupt * * RESULT * * DNA_BAD_ARGUMENT: one of the arguments is incorrect * * DNA_OK: the operation is successful * * SOURCE */ { isr_t isr = NULL; queue_t * queue = NULL; interrupt_status_t it_status = 0; watch (status_t) { ensure (id < CPU_TRAP_COUNT, DNA_BAD_ARGUMENT); ensure (cpuid < cpu_mp_count (), DNA_BAD_ARGUMENT); /* * Remove the ISR from the appropriate queue. */ it_status = cpu_trap_mask_and_backup(); queue = & cpu_pool . cpu[cpuid] . isr[id]; lock_acquire (& queue -> lock); isr = queue_lookup (queue, interrupt_handler_inspector, handler); check (no_isr, isr != NULL, DNA_BAD_ARGUMENT); queue_extract (queue, isr); /* * If there is no more handler for the specified * interrupt, disable it. */ if (queue -> status == 0) { if (cpuid == cpu_mp_id ()) { cpu_trap_disable (id); } else { lock_acquire (& cpu_pool . cpu[cpuid] . ipi_lock); cpu_mp_send_ipi (cpuid, DNA_IPI_TRAP_DISABLE, (void *) id); } } lock_release (& queue -> lock); cpu_trap_restore(it_status); kernel_free (isr); return DNA_OK; } rescue (no_isr) { lock_release (& queue -> lock); cpu_trap_restore(it_status); leave; } }
status_t port_create (char * name, int32_t queue_length, int32_t * p_id) /* * ARGUMENTS * * name : the name of the port. * * queue_length : the length of its queue. * * RESULT * * DNA_NO_MORE_PORT: no more port available * * DNA_OUT_OF_MEM: cannot allocate memory to create a port * * DNA_OK: the operation succeeded * * SOURCE */ { int16_t index; port_t port = NULL; status_t status; interrupt_status_t it_status = 0; watch (status_t) { ensure (name != NULL && p_id != NULL, DNA_BAD_ARGUMENT); ensure (queue_length > 0, DNA_BAD_ARGUMENT); it_status = cpu_trap_mask_and_backup(); lock_acquire (& port_pool . lock); /* * Get an empty port slot. */ port = queue_rem (& port_pool . port); check (pool_error, port != NULL, DNA_NO_MORE_PORT); /* * Make the place clean. */ index = port -> id . s . index; dna_memset (port, 0, sizeof (struct _port)); port -> id . s . index = index; port -> id . s . value = port_pool . counter; semaphore_pool . counter += 1; lock_release (& port_pool . lock); cpu_trap_restore(it_status); /* * Creating the messages. */ port -> data = kernel_malloc (sizeof (struct _message) * queue_length, true); check (no_mem, port -> data != NULL, DNA_OUT_OF_MEM); for (int32_t i = 0; i < queue_length; i += 1) { queue_add (& port -> message, & port -> data[i]); } /* * Creating the semaphores. */ status = semaphore_create (name, queue_length, & port -> write_sem); check (wsem_error, status == DNA_OK, status); status = semaphore_create (name, 0, & port -> read_sem); check (rsem_error, status == DNA_OK, status); dna_strcpy (port -> info . name, name); port -> info . capacity = queue_length; /* * Return the port information. */ *p_id = port -> id . raw; return DNA_OK; } rescue (rsem_error) { semaphore_destroy (port -> write_sem); } rescue (wsem_error) { kernel_free (port -> data); } rescue (no_mem) { it_status = cpu_trap_mask_and_backup(); lock_acquire (& port_pool . lock); queue_add (& port_pool . port, port); } rescue (pool_error) { lock_release (& port_pool . lock); cpu_trap_restore(it_status); leave; } }