/* * Get the next available entry in the list. This will allocate memory as * required based on the initialization values in bl_create(). Returns a * pointer to the allocated memory segment or NULL if operation was not * possible. */ char * bl_next_avail(int list_handle) { struct blk_list_cs *bl_ptr; char *retval; if (invalid_handle(list_handle)) return (NULL); bl_ptr = bl_cs_array[list_handle]; /* * Allocate more memory if none is allocated yet or our last access * filled the allotted segment. */ if (bl_ptr->cur_segment == NULL || bl_ptr->cur_segment->full) if (!alloc_next_seg(bl_ptr)) return (NULL); /* Get the correct pointer. */ retval = bl_ptr->cur_segment->avail_ptr; /* Advance it and mark if full. */ bl_ptr->cur_segment->avail_ptr += bl_ptr->struct_size; bl_ptr->total_elem++; if (bl_ptr->cur_segment->avail_ptr >= bl_ptr->cur_segment->eoseg_ptr) bl_ptr->cur_segment->full = 1; return (retval); }
int pthread_cancel(pthread_t thread) { pthread_handle handle = thread_handle(thread); int pid; int dorestart = 0; pthread_descr th; pthread_extricate_if *pextricate; int already_canceled; __pthread_lock(&handle->h_lock, NULL); if (invalid_handle(handle, thread)) { __pthread_unlock(&handle->h_lock); return ESRCH; } th = handle->h_descr; already_canceled = th->p_canceled; th->p_canceled = 1; if (th->p_cancelstate == PTHREAD_CANCEL_DISABLE || already_canceled) { __pthread_unlock(&handle->h_lock); return 0; } pextricate = th->p_extricate; pid = th->p_pid; /* If the thread has registered an extrication interface, then invoke the interface. If it returns 1, then we succeeded in dequeuing the thread from whatever waiting object it was enqueued with. In that case, it is our responsibility to wake it up. And also to set the p_woken_by_cancel flag so the woken thread can tell that it was woken by cancellation. */ if (pextricate != NULL) { dorestart = pextricate->pu_extricate_func(pextricate->pu_object, th); th->p_woken_by_cancel = dorestart; } __pthread_unlock(&handle->h_lock); /* If the thread has suspended or is about to, then we unblock it by issuing a restart, instead of a cancel signal. Otherwise we send the cancel signal to unblock the thread from a cancellation point, or to initiate asynchronous cancellation. The restart is needed so we have proper accounting of restarts; suspend decrements the thread's resume count, and restart() increments it. This also means that suspend's handling of the cancel signal is obsolete. */ if (dorestart) restart(th); else kill(pid, __pthread_sig_cancel); return 0; }
/* * Relinquish the array back to the memory pool. Note that there is no method * provided to free *all* arrays. */ void ar_free(int list_handle) { if (invalid_handle(list_handle)) return; bl_free(bl_cs_array[list_handle]->data_handle); bl_free(list_handle); }
static int invalid_record(int list_handle, int recno) { if (invalid_handle(list_handle)) return (1); if (recno < 0 || recno > bl_cs_array[list_handle]->total_elem) return (1); return (0); }
Gobby::StatusBar::MessageHandle Gobby::StatusBar::add_info_message(const Glib::ustring& message, unsigned int timeout) { MessageHandle handle = Gobby::StatusBar::add_message(INFO, message, "", timeout); // Caller is not allowed to hold on to handles to messages that we are // going to delete anyway. if(timeout) return invalid_handle(); else return handle; }
int serial_open(serial_handle_t* h, const char* path) { invalid_handle(h); if ((h->fd = open(path, O_RDWR | O_NONBLOCK | O_NOCTTY)) == -1) { DEBUG_ERROR("open() == %u\n", errno); return -1; } if (tcgetattr(h->fd, &h->termios) == -1) { DEBUG_ERROR("tcgetattr() == %u\n", errno); goto on_error; } #if _DEBUG h->name = strdup(path); if (h->name == NULL) DEBUG_ERROR("h->name == NULL\n"); #endif /* success */ return 0; on_error: close(h->fd); invalid_handle(h); return -1; }
void serial_close(serial_handle_t* h) { /* assume h->fd valid */ #if _DEBUG if (h->name != NULL) free(h->name); #endif tcsetattr(h->fd, TCSANOW, &h->termios); close(h->fd); invalid_handle(h); }
int pthread_kill(pthread_t thread, int signo) { pthread_handle handle = thread_handle(thread); int pid; __pthread_lock(&handle->h_lock, NULL); if (invalid_handle(handle, thread)) { __pthread_unlock(&handle->h_lock); return ESRCH; } pid = handle->h_descr->p_pid; __pthread_unlock(&handle->h_lock); if (kill(pid, signo) == -1) return errno; else return 0; }
int pthread_kill(pthread_t thread, int signo) { pthread_handle handle = thread_handle(thread); int pid; acquire(&handle->h_spinlock); if (invalid_handle(handle, thread)) { release(&handle->h_spinlock); return ESRCH; } pid = handle->h_descr->p_pid; release(&handle->h_spinlock); if (kill(pid, signo) == -1) return errno; else return 0; }
int pthread_join(pthread_t thread_id, void ** thread_return) { volatile pthread_descr self = thread_self(); struct pthread_request request; pthread_handle handle = thread_handle(thread_id); pthread_descr th; acquire(&handle->h_spinlock); if (invalid_handle(handle, thread_id)) { release(&handle->h_spinlock); return ESRCH; } th = handle->h_descr; if (th == self) { release(&handle->h_spinlock); return EDEADLK; } /* If detached or already joined, error */ if (th->p_detached || th->p_joining != NULL) { release(&handle->h_spinlock); return EINVAL; } /* If not terminated yet, suspend ourselves. */ if (! th->p_terminated) { th->p_joining = self; release(&handle->h_spinlock); suspend_with_cancellation(self); /* This is a cancellation point */ if (self->p_canceled && self->p_cancelstate == PTHREAD_CANCEL_ENABLE) { th->p_joining = NULL; pthread_exit(PTHREAD_CANCELED); } acquire(&handle->h_spinlock); } /* Get return value */ if (thread_return != NULL) *thread_return = th->p_retval; release(&handle->h_spinlock); /* Send notification to thread manager */ if (__pthread_manager_request >= 0) { request.req_thread = self; request.req_kind = REQ_FREE; request.req_args.free.thread = th; write(__pthread_manager_request, (char *) &request, sizeof(request)); } return 0; }
void bl_free(int list_handle) { int cur_handle; if (list_handle == -1) { for (cur_handle = 0; cur_handle < next_array_elem; cur_handle++) { free_list(cur_handle); } } else { if (invalid_handle(list_handle)) return; free_list(list_handle); } }
int pthread_getschedparam(pthread_t thread, int *policy, struct sched_param *param) { pthread_handle handle = thread_handle(thread); int pid, pol; __pthread_lock(&handle->h_lock, NULL); if (invalid_handle(handle, thread)) { __pthread_unlock(&handle->h_lock); return ESRCH; } pid = handle->h_descr->p_pid; __pthread_unlock(&handle->h_lock); pol = sched_getscheduler(pid); if (pol == -1) return errno; if (sched_getparam(pid, param) == -1) return errno; *policy = pol; return 0; }
int pthread_setschedparam(pthread_t thread, int policy, const struct sched_param *param) { pthread_handle handle = thread_handle(thread); pthread_descr th; __pthread_lock(&handle->h_lock, NULL); if (invalid_handle(handle, thread)) { __pthread_unlock(&handle->h_lock); return ESRCH; } th = handle->h_descr; if (sched_setscheduler(th->p_pid, policy, param) == -1) { __pthread_unlock(&handle->h_lock); return errno; } th->p_priority = policy == SCHED_OTHER ? 0 : param->sched_priority; __pthread_unlock(&handle->h_lock); if (__pthread_manager_request >= 0) __pthread_manager_adjust_prio(th->p_priority); return 0; }
int pthread_detach(pthread_t thread_id) { int terminated; struct pthread_request request; pthread_handle handle = thread_handle(thread_id); pthread_descr th; __pthread_lock(&handle->h_lock, NULL); if (invalid_handle(handle, thread_id)) { __pthread_unlock(&handle->h_lock); return ESRCH; } th = handle->h_descr; /* If already detached, error */ if (th->p_detached) { __pthread_unlock(&handle->h_lock); return EINVAL; } /* If already joining, don't do anything. */ if (th->p_joining != NULL) { __pthread_unlock(&handle->h_lock); return 0; } /* Mark as detached */ th->p_detached = 1; terminated = th->p_terminated; __pthread_unlock(&handle->h_lock); /* If already terminated, notify thread manager to reclaim resources */ if (terminated && __pthread_manager_request >= 0) { request.req_thread = thread_self(); request.req_kind = REQ_FREE; request.req_args.free.thread_id = thread_id; TEMP_FAILURE_RETRY(__libc_write(__pthread_manager_request, (char *) &request, sizeof(request))); } return 0; }
static void pthread_handle_free(pthread_t th_id) { pthread_handle handle = thread_handle(th_id); pthread_descr th; __pthread_lock(&handle->h_lock, NULL); if (invalid_handle(handle, th_id)) { /* pthread_reap_children has deallocated the thread already, nothing needs to be done */ __pthread_unlock(&handle->h_lock); return; } th = handle->h_descr; if (th->p_exited) { __pthread_unlock(&handle->h_lock); pthread_free(th); } else { /* The Unix process of the thread is still running. Mark the thread as detached so that the thread manager will deallocate its resources when the Unix process exits. */ th->p_detached = 1; __pthread_unlock(&handle->h_lock); } }
int pthread_join(pthread_t thread_id, void ** thread_return) { volatile pthread_descr self = thread_self(); struct pthread_request request; pthread_handle handle = thread_handle(thread_id); pthread_descr th; pthread_extricate_if extr; int already_canceled = 0; PDEBUG("\n"); /* Set up extrication interface */ extr.pu_object = handle; extr.pu_extricate_func = join_extricate_func; __pthread_lock(&handle->h_lock, self); if (invalid_handle(handle, thread_id)) { __pthread_unlock(&handle->h_lock); return ESRCH; } th = handle->h_descr; if (th == self) { __pthread_unlock(&handle->h_lock); return EDEADLK; } /* If detached or already joined, error */ if (th->p_detached || th->p_joining != NULL) { __pthread_unlock(&handle->h_lock); return EINVAL; } /* If not terminated yet, suspend ourselves. */ if (! th->p_terminated) { /* Register extrication interface */ __pthread_set_own_extricate_if(self, &extr); if (!(THREAD_GETMEM(self, p_canceled) && THREAD_GETMEM(self, p_cancelstate) == PTHREAD_CANCEL_ENABLE)) th->p_joining = self; else already_canceled = 1; __pthread_unlock(&handle->h_lock); if (already_canceled) { __pthread_set_own_extricate_if(self, 0); __pthread_do_exit(PTHREAD_CANCELED, CURRENT_STACK_FRAME); } PDEBUG("before suspend\n"); suspend(self); PDEBUG("after suspend\n"); /* Deregister extrication interface */ __pthread_set_own_extricate_if(self, 0); /* This is a cancellation point */ if (THREAD_GETMEM(self, p_woken_by_cancel) && THREAD_GETMEM(self, p_cancelstate) == PTHREAD_CANCEL_ENABLE) { THREAD_SETMEM(self, p_woken_by_cancel, 0); __pthread_do_exit(PTHREAD_CANCELED, CURRENT_STACK_FRAME); } __pthread_lock(&handle->h_lock, self); } /* Get return value */ if (thread_return != NULL) *thread_return = th->p_retval; __pthread_unlock(&handle->h_lock); /* Send notification to thread manager */ if (__pthread_manager_request >= 0) { request.req_thread = self; request.req_kind = REQ_FREE; request.req_args.free.thread_id = thread_id; TEMP_FAILURE_RETRY(__libc_write(__pthread_manager_request, (char *) &request, sizeof(request))); } return 0; }