void resumeAllThreads(Thread *self) { Thread *thread; TRACE("Thread 0x%x id: %d is resuming all threads\n", self, self->id); pthread_mutex_lock(&lock); for(thread = &main_thread; thread != NULL; thread = thread->next) { if(thread == self) continue; thread->suspend = FALSE; MBARRIER(); if(!thread->blocking) { TRACE("Sending resume signal to thread 0x%x id: %d\n", thread, thread->id); pthread_kill(thread->tid, SIGUSR1); } } for(thread = &main_thread; thread != NULL; thread = thread->next) { while(thread->state == SUSPENDED) { TRACE("Waiting for thread 0x%x id: %d to resume\n", thread, thread->id); sched_yield(); } } all_threads_suspended = FALSE; if(threads_waiting_to_start) { TRACE("%d threads waiting to start...\n", threads_waiting_to_start); pthread_cond_broadcast(&cv); } TRACE("All threads resumed...\n"); pthread_mutex_unlock(&lock); }
void threadPark(Thread *self, int absolute, long long time) { /* If we have a permit use it and return immediately. No locking as we're the only one that can change the state at this point */ if(self->park_state == PARK_PERMIT) { self->park_state = PARK_RUNNING; MBARRIER(); return; } /* Spin until we can get the park lock. This avoids having to disable suspension around pthread_mutex_lock */ while(pthread_mutex_trylock(&self->park_lock)) sched_yield(); /* A thread may have given us a permit while we were waiting for the lock or we may be running. Reduce the state by one (PERMIT -> RUNNING, RUNNING -> BLOCKED) and wait if we're now blocked */ if(--self->park_state == PARK_BLOCKED) { /* Really must disable suspension now as we're going to sleep */ disableSuspend(self); if(time) { struct timespec ts; if(absolute) getTimeoutAbsolute(&ts, time, 0); else getTimeoutRelative(&ts, 0, time); self->state = TIMED_WAITING; pthread_cond_timedwait(&self->park_cv, &self->park_lock, &ts); /* On Linux/i386 systems using LinuxThreads, pthread_cond_timedwait is implemented using sigjmp/longjmp. This resets the fpu control word back to 64-bit precision. The macro is empty for sane platforms. */ FPU_HACK; } else { self->state = WAITING; pthread_cond_wait(&self->park_cv, &self->park_lock); } /* If we were unparked park_state will have been updated, but not if the wait timed out. Only update if it's blocked, to avoid losing a possible permit */ if(self->park_state == PARK_BLOCKED) self->park_state = PARK_RUNNING; self->state = RUNNING; enableSuspend(self); } pthread_mutex_unlock(&self->park_lock); }
void suspendAllThreads(Thread *self) { Thread *thread; TRACE("Thread 0x%x id: %d is suspending all threads\n", self, self->id); pthread_mutex_lock(&lock); for(thread = &main_thread; thread != NULL; thread = thread->next) { if(thread == self) continue; thread->suspend = TRUE; MBARRIER(); if(!thread->blocking) { TRACE("Sending suspend signal to thread 0x%x id: %d\n", thread, thread->id); pthread_kill(thread->tid, SIGUSR1); } } for(thread = &main_thread; thread != NULL; thread = thread->next) { if(thread == self) continue; while(thread->blocking != SUSP_BLOCKING && thread->state != SUSPENDED) { TRACE("Waiting for thread 0x%x id: %d to suspend\n", thread, thread->id); sched_yield(); } } all_threads_suspended = TRUE; TRACE("All threads suspended...\n"); pthread_mutex_unlock(&lock); }
void inlineBlockWrappedOpcode(MethodBlock *mb, Instruction *pc) { PrepareInfo *prepare_info = pc->operand.pntr; OpcodeInfo *info; int i; Thread *self = threadSelf(); rewriteLock(self); for(i = 0; i < HANDLERS; i++) if(pc->handler == handler_entry_points[i][OPC_INLINE_REWRITER]) break; if(i == HANDLERS) { rewriteUnlock(self); return; } pc->handler = handler_entry_points[0][GOTO_START]; rewriteUnlock(self); /* Unwrap the original handler's operand */ pc->operand = prepare_info->operand; MBARRIER(); /* Unwrap the original handler */ info = &prepare_info->block.opcodes[prepare_info->block.length-1]; pc->handler = handler_entry_points[info->cache_depth][info->opcode]; inlineBlock(mb, &prepare_info->block); sysFree(prepare_info); }
void objectUnlock(Object *obj) { Thread *self = threadSelf(); uintptr_t lockword = LOCKWORD_READ(&obj->lock); uintptr_t thin_locked = self->id<<TID_SHIFT; TRACE("Thread %p unlock on obj %p...\n", self, obj); if(lockword == thin_locked) { /* This barrier is not needed for the thin-locking implementation; it's a requirement of the Java memory model. */ JMM_UNLOCK_MBARRIER(); LOCKWORD_WRITE(&obj->lock, 0); /* Required by thin-locking mechanism. */ MBARRIER(); retry: if(testFlcBit(obj)) { Monitor *mon = findMonitor(obj); if(!monitorTryLock(mon, self)) { threadYield(self); goto retry; } if(testFlcBit(obj) && (mon->obj == obj)) monitorNotify(mon, self); monitorUnlock(mon, self); } } else { if((lockword & (TID_MASK|SHAPE_BIT)) == thin_locked) LOCKWORD_WRITE(&obj->lock, lockword - (1<<COUNT_SHIFT)); else if((lockword & SHAPE_BIT) != 0) { Monitor *mon = (Monitor*) (lockword & ~SHAPE_BIT); if((mon->count == 0) && (LOCKWORD_READ(&mon->entering) == 0) && (mon->in_wait == 0)) { TRACE("Thread %p is deflating obj %p...\n", self, obj); /* This barrier is not needed for the thin-locking implementation; it's a requirement of the Java memory model. */ JMM_UNLOCK_MBARRIER(); LOCKWORD_WRITE(&obj->lock, 0); LOCKWORD_COMPARE_AND_SWAP(&mon->entering, 0, UN_USED); } monitorUnlock(mon, self); } } }
void disableSuspend0(Thread *thread, void *stack_top) { sigset_t mask; thread->stack_top = stack_top; thread->blocking = SUSP_BLOCKING; MBARRIER(); sigemptyset(&mask); sigaddset(&mask, SIGUSR1); pthread_sigmask(SIG_BLOCK, &mask, NULL); }
static void suspendLoop(Thread *thread) { char old_state = thread->state; sigjmp_buf env; sigset_t mask; sigsetjmp(env, FALSE); thread->stack_top = &env; thread->state = SUSPENDED; MBARRIER(); sigfillset(&mask); sigdelset(&mask, SIGUSR1); sigdelset(&mask, SIGTERM); while(thread->suspend && !thread->blocking) sigsuspend(&mask); thread->state = old_state; MBARRIER(); }
void inlineSequence(MethodBlock *mb, CodeBlock *info, int start, int len) { int code_len = goto_len + sizeof(CodeBlockHeader); Instruction *instructions = &info->start[start]; OpcodeInfo *opcodes = &info->opcodes[start]; CodeBlockHeader *hashed_block, *block; int aligned_len, i; char *pntr; /* Calculate sequence length */ for(i = 0; i < len; i++) code_len += handler_sizes[opcodes[i].cache_depth][opcodes[i].opcode]; aligned_len = ALIGN(code_len); /* We malloc memory for the block rather than allocating code memory. This reduces fragmentation of the code memory in the case where we use an existing block and must free the new sequence */ block = sysMalloc(aligned_len); /* Store length at beginning of sequence */ block->len = aligned_len; pntr = (char *)(block + 1); /* Concatenate the handler bodies together */ for(i = 0; i < len; i++) { int size = handler_sizes[opcodes[i].cache_depth][opcodes[i].opcode]; memcpy(pntr, instructions[i].handler, size); pntr += size; } /* Add the dispatch onto the end of the super-instruction */ memcpy(pntr, goto_start, goto_len); /* Pad with zeros up to block length */ for(pntr += goto_len; code_len < aligned_len; code_len++) *pntr++ = 0; /* Look up new block in inlined block cache */ hashed_block = findCodeBlock(block); sysFree(block); if(hashed_block != NULL) { /* Replace first handler with new inlined block */ instructions[0].handler = hashed_block + 1; MBARRIER(); TRACE("InlineSequence %s start %p (%d) instruction len %d code len %d sequence %p\n", mb->name, instructions, start, len, code_len, instructions[0].handler); } }
void resumeThread(Thread *thread) { thread->suspend = FALSE; MBARRIER(); if(!thread->blocking) { TRACE("Sending resume signal to thread 0x%x id: %d\n", thread, thread->id); pthread_kill(thread->tid, SIGUSR1); } while(thread->state == SUSPENDED) { TRACE("Waiting for thread 0x%x id: %d to resume\n", thread, thread->id); sched_yield(); } }
void suspendThread(Thread *thread) { thread->suspend = TRUE; MBARRIER(); if(!thread->blocking) { TRACE("Sending suspend signal to thread 0x%x id: %d\n", thread, thread->id); pthread_kill(thread->tid, SIGUSR1); } while(thread->blocking != SUSP_BLOCKING && thread->state != SUSPENDED) { TRACE("Waiting for thread 0x%x id: %d to suspend\n", thread, thread->id); sched_yield(); } }
void threadInterrupt(Thread *thread) { Thread *self = threadSelf(); Monitor *mon; /* MonitorWait sets wait_mon _before_ checking interrupted status. Therefore, if wait_mon is null, interrupted status will be noticed. This guards against a race-condition leading to an interrupt being missed. The memory barrier ensures correct ordering on SMP systems. */ thread->interrupted = TRUE; MBARRIER(); if((mon = thread->wait_mon) != NULL && thread->wait_next != NULL) { int locked; thread->interrupting = TRUE; /* The thread is waiting on a monitor, but it may not have entered the wait (in which case the signal will be lost). Loop until we can get ownership (i.e. the thread has released it on waiting) */ while(!(locked = !pthread_mutex_trylock(&mon->lock)) && mon->owner == NULL) sched_yield(); pthread_cond_signal(&thread->wait_cv); if(locked) pthread_mutex_unlock(&mon->lock); } /* Thread may still be parked */ threadUnpark(thread); /* Handle the case where the thread is blocked in a system call. This will knock it out with an EINTR. The suspend signal handler will just return (as in the user doing a kill), and do nothing otherwise. */ /* Note, under Linuxthreads pthread_kill obtains a lock on the thread being signalled. If another thread is suspending all threads, and the interrupting thread is earlier in the thread list than the thread being interrupted, it can be suspended holding the lock. When the suspending thread tries to signal the interrupted thread it will deadlock. To prevent this, disable suspension. */ fastDisableSuspend(self); pthread_kill(thread->tid, SIGUSR1); fastEnableSuspend(self); }
void enableSuspend(Thread *thread) { sigset_t mask; thread->blocking = FALSE; MBARRIER(); if(thread->suspend) { TRACE("Thread 0x%x id: %d is self suspending\n", thread, thread->id); suspendLoop(thread); TRACE("Thread 0x%x id: %d resumed\n", thread, thread->id); } sigemptyset(&mask); sigaddset(&mask, SIGUSR1); pthread_sigmask(SIG_UNBLOCK, &mask, NULL); }