Exemplo n.º 1
0
/*
 * This implements java.lang.Thread.sleep(long msec, int nsec).
 *
 * The sleep is interruptible by other threads, which means we can't just
 * plop into an OS sleep call.  (We probably could if we wanted to send
 * signals around and rely on EINTR, but that's inefficient and relies
 * on native code respecting our signal mask.)
 *
 * We have to do all of this stuff for Object.wait() as well, so it's
 * easiest to just sleep on a private Monitor.
 *
 * It appears that we want sleep(0,0) to go through the motions of sleeping
 * for a very short duration, rather than just returning.
 */
void dvmThreadSleep(u8 msec, u4 nsec)
{
    Thread* self = dvmThreadSelf();
    Monitor* mon = gDvm.threadSleepMon;

    /* sleep(0,0) wakes up immediately, wait(0,0) means wait forever; adjust */
    if (msec == 0 && nsec == 0)
        nsec++;

    lockMonitor(self, mon);
    waitMonitor(self, mon, msec, nsec, true);
    unlockMonitor(self, mon);
}
Exemplo n.º 2
0
void
DOMStorageDBThread::ThreadFunc()
{
  nsresult rv = InitDatabase();

  MonitorAutoLock lockMonitor(mMonitor);

  if (NS_FAILED(rv)) {
    mStatus = rv;
    mStopIOThread = true;
    return;
  }

  while (MOZ_LIKELY(!mStopIOThread || mPreloads.Length() || mPendingTasks.HasTasks())) {
    if (MOZ_UNLIKELY(TimeUntilFlush() == 0)) {
      // Flush time is up or flush has been forced, do it now.
      UnscheduleFlush();
      if (mPendingTasks.Prepare()) {
        {
          MonitorAutoUnlock unlockMonitor(mMonitor);
          rv = mPendingTasks.Execute(this);
        }

        if (!mPendingTasks.Finalize(rv)) {
          mStatus = rv;
          NS_WARNING("localStorage DB access broken");
        }
      }
      NotifyFlushCompletion();
    } else if (MOZ_LIKELY(mPreloads.Length())) {
      nsAutoPtr<DBOperation> op(mPreloads[0]);
      mPreloads.RemoveElementAt(0);
      {
        MonitorAutoUnlock unlockMonitor(mMonitor);
        op->PerformAndFinalize(this);
      }

      if (op->Type() == DBOperation::opPreloadUrgent) {
        SetDefaultPriority(); // urgent preload unscheduled
      }
    } else if (MOZ_UNLIKELY(!mStopIOThread)) {
      lockMonitor.Wait(TimeUntilFlush());
    }
  } // thread loop

  mStatus = ShutdownDatabase();
}
Exemplo n.º 3
0
/*
 * Changes the shape of a monitor from thin to fat, preserving the
 * internal lock state.  The calling thread must own the lock.
 */
static void inflateMonitor(Thread *self, Object *obj)
{
    Monitor *mon;
    u4 thin;

    assert(self != NULL);
    assert(obj != NULL);
    assert(LW_SHAPE(obj->lock) == LW_SHAPE_THIN);
    assert(LW_LOCK_OWNER(obj->lock) == self->threadId);
    /* Allocate and acquire a new monitor. */
    mon = dvmCreateMonitor(obj);
    lockMonitor(self, mon);
    /* Propagate the lock state. */
    thin = obj->lock;
    mon->lockCount = LW_LOCK_COUNT(thin);
    thin &= LW_HASH_STATE_MASK << LW_HASH_STATE_SHIFT;
    thin |= (u4)mon | LW_SHAPE_FAT;
    /* Publish the updated lock word. */
    android_atomic_release_store(thin, (int32_t *)&obj->lock);
}
Exemplo n.º 4
0
void
DOMStorageDBThread::ThreadFunc()
{
  nsresult rv = InitDatabase();

  MonitorAutoLock lockMonitor(mThreadObserver->GetMonitor());

  if (NS_FAILED(rv)) {
    mStatus = rv;
    mStopIOThread = true;
    return;
  }

  // Create an nsIThread for the current PRThread, so we can observe runnables
  // dispatched to it.
  nsCOMPtr<nsIThread> thread = NS_GetCurrentThread();
  nsCOMPtr<nsIThreadInternal> threadInternal = do_QueryInterface(thread);
  MOZ_ASSERT(threadInternal); // Should always succeed.
  threadInternal->SetObserver(mThreadObserver);

  while (MOZ_LIKELY(!mStopIOThread || mPreloads.Length() ||
                    mPendingTasks.HasTasks() ||
                    mThreadObserver->HasPendingEvents())) {
    // Process xpcom events first.
    while (MOZ_UNLIKELY(mThreadObserver->HasPendingEvents())) {
      mThreadObserver->ClearPendingEvents();
      MonitorAutoUnlock unlock(mThreadObserver->GetMonitor());
      bool processedEvent;
      do {
        rv = thread->ProcessNextEvent(false, &processedEvent);
      } while (NS_SUCCEEDED(rv) && processedEvent);
    }

    if (MOZ_UNLIKELY(TimeUntilFlush() == 0)) {
      // Flush time is up or flush has been forced, do it now.
      UnscheduleFlush();
      if (mPendingTasks.Prepare()) {
        {
          MonitorAutoUnlock unlockMonitor(mThreadObserver->GetMonitor());
          rv = mPendingTasks.Execute(this);
        }

        if (!mPendingTasks.Finalize(rv)) {
          mStatus = rv;
          NS_WARNING("localStorage DB access broken");
        }
      }
      NotifyFlushCompletion();
    } else if (MOZ_LIKELY(mPreloads.Length())) {
      nsAutoPtr<DBOperation> op(mPreloads[0]);
      mPreloads.RemoveElementAt(0);
      {
        MonitorAutoUnlock unlockMonitor(mThreadObserver->GetMonitor());
        op->PerformAndFinalize(this);
      }

      if (op->Type() == DBOperation::opPreloadUrgent) {
        SetDefaultPriority(); // urgent preload unscheduled
      }
    } else if (MOZ_UNLIKELY(!mStopIOThread)) {
      lockMonitor.Wait(TimeUntilFlush());
    }
  } // thread loop

  mStatus = ShutdownDatabase();

  if (threadInternal) {
    threadInternal->SetObserver(nullptr);
  }
}
Exemplo n.º 5
0
/**
 * The unsigned indexbyte1 and indexbyte2 are used to construct an index into the runtime constant pool of the current class (§2.6),
 * where the value of the index is (indexbyte1 << 8) | indexbyte2. The runtime constant pool item at that index must be a symbolic reference to a method (§5.1),
 * which gives the name and descriptor (§4.3.3) of the method as well as a symbolic reference to the class in which the method is to be found.
 * The named method is resolved (§5.4.3.3). Finally, if the resolved method is protected (§4.6), and it is a member of a superclass of the current class,
 * and the method is not declared in the same runtime package (§5.3) as the current class,
 * then the class of objectref must be either the current class or a subclass of the current class.
 */
void JavaVM::pushNewFrame(StackFrame * stackFrame)
{
	Frame * frame = stackFrame->top();
	//get a process counter and get a next instruction
	//invoke instance method on object objectref, where the method is identified by method reference index in constant pool (indexbyte1 << 8 + indexbyte2)
	u1 * ptr = &frame->_code->code[frame->_pc + 1];
	u2 method_index = getu2(ptr);
	CPBase * pConstPool = frame->_clazz->_constantPool[method_index];

	//get method
	ASSERT(pConstPool->tag == CONSTANT_Methodref);
	CONSTANT_Methodref_info_resolve * method_ref = reinterpret_cast<CONSTANT_Methodref_info_resolve *>(pConstPool);

	/*get class name*/
	std::string class_name;
	class_name.assign((const char *)method_ref->_class_index->_name_index->bytes, method_ref->_class_index->_name_index->length);
	boost::shared_ptr<JavaClass> clazz(_classHeap->getClass(class_name));

	/*get method*/
	std::string method_name, method_desc;
	frame->_clazz->getStringFromConstPool(method_ref->_name_and_type_index->name_index, method_name);
	frame->_clazz->getStringFromConstPool(method_ref->_name_and_type_index->descriptor_index, method_desc);

	printf("invoke %s.%s:%s\n", class_name.c_str(), method_name.c_str(), method_desc.c_str());
	jClass virtual_clazz(clazz);
	u2 nIndex = clazz->getMethodID(method_name, method_desc, virtual_clazz);
	
	Frame * new_f = new Frame();
	new_f->_clazz = virtual_clazz;
	new_f->_method = virtual_clazz->_methods[nIndex];
	if (!(virtual_clazz->_methods[nIndex]->getAccessFlags() & ACC_NATIVE)) // there is no code
	{
		Code_attribute * code = virtual_clazz->getMethodCodeAttribute(nIndex);
		new_f->setCode(code);
	}
	else
	{
		Code_attribute * code = new Code_attribute();
		code->max_locals = 1;
		code->max_stack = 0;
		code->code = NULL;
		new_f->setCode(code);
	}

	/*
	 * The Java virtual machine uses local variables to pass parameters on method invocation
	 * http://docs.oracle.com/javase/specs/jvms/se7/html/jvms-2.html#jvms-2.6.1
	 */
	std::string params = countMethodStack(method_desc);
	for (std::size_t i = params.length(), j = i; i >= 0; i--, j--)
	{
		/** 
		  * The objectref and the argument values are consecutively made the values of local variables of the new frame,
		  * with objectref in local variable 0,
		  * arg1 in local variable 1 (or, if arg1 is of type long or double, in local variables 1 and 2), and so on
		  */
		new_f->_localVariable[i] = frame->_operandStack.top();
		frame->_operandStack.pop();
		
		//TODO melhorar
		if (i == 0)
			break;

		/* if it is long or double I need to add 1 in local variable */
		if(params.size() > 0 && (params[j-1] =='J' || params[j-1] =='D'))
			i--;
	}

	if (new_f->_method->getAccessFlags() & ACC_SYNCHRONIZED)
	{
		if (lockMonitor(new_f->_localVariable[0]._var._objectref))
		{
			
		}
		else
		{
			new_f->_lock = true;
			new_f->_waitingObj = new_f->_localVariable[0]._var._objectref;
			this->wait(new_f->_waitingObj, boost::bind(&Frame::unlock, new_f));
		}
	}

	//if this method has ACC_SYNCHRONIZED then I need to do monitorenter, it is implicit
	//I need to get new_f->_localVariable[0], this location in array is objref, I need to lock this object
	stackFrame->push(new_f);
}
Exemplo n.º 6
0
/*
 * Implements monitorenter for "synchronized" stuff.
 *
 * This does not fail or throw an exception (unless deadlock prediction
 * is enabled and set to "err" mode).
 */
void dvmLockObject(Thread* self, Object *obj)
{
    volatile u4 *thinp;
    ThreadStatus oldStatus;
    struct timespec tm;
    long sleepDelayNs;
    long minSleepDelayNs = 1000000;  /* 1 millisecond */
    long maxSleepDelayNs = 1000000000;  /* 1 second */
    u4 thin, newThin, threadId;

    assert(self != NULL);
    assert(obj != NULL);
    threadId = self->threadId;
    thinp = &obj->lock;
retry:
    thin = *thinp;
    if (LW_SHAPE(thin) == LW_SHAPE_THIN) {
        /*
         * The lock is a thin lock.  The owner field is used to
         * determine the acquire method, ordered by cost.
         */
        if (LW_LOCK_OWNER(thin) == threadId) {
            /*
             * The calling thread owns the lock.  Increment the
             * value of the recursion count field.
             */
            obj->lock += 1 << LW_LOCK_COUNT_SHIFT;
            if (LW_LOCK_COUNT(obj->lock) == LW_LOCK_COUNT_MASK) {
                /*
                 * The reacquisition limit has been reached.  Inflate
                 * the lock so the next acquire will not overflow the
                 * recursion count field.
                 */
                inflateMonitor(self, obj);
            }
        } else if (LW_LOCK_OWNER(thin) == 0) {
            /*
             * The lock is unowned.  Install the thread id of the
             * calling thread into the owner field.  This is the
             * common case.  In performance critical code the JIT
             * will have tried this before calling out to the VM.
             */
            newThin = thin | (threadId << LW_LOCK_OWNER_SHIFT);
            if (android_atomic_acquire_cas(thin, newThin,
                    (int32_t*)thinp) != 0) {
                /*
                 * The acquire failed.  Try again.
                 */
                goto retry;
            }
        } else {
            ALOGV("(%d) spin on lock %p: %#x (%#x) %#x",
                 threadId, &obj->lock, 0, *thinp, thin);
            /*
             * The lock is owned by another thread.  Notify the VM
             * that we are about to wait.
             */
            oldStatus = dvmChangeStatus(self, THREAD_MONITOR);
            /*
             * Spin until the thin lock is released or inflated.
             */
            sleepDelayNs = 0;
            for (;;) {
                thin = *thinp;
                /*
                 * Check the shape of the lock word.  Another thread
                 * may have inflated the lock while we were waiting.
                 */
                if (LW_SHAPE(thin) == LW_SHAPE_THIN) {
                    if (LW_LOCK_OWNER(thin) == 0) {
                        /*
                         * The lock has been released.  Install the
                         * thread id of the calling thread into the
                         * owner field.
                         */
                        newThin = thin | (threadId << LW_LOCK_OWNER_SHIFT);
                        if (android_atomic_acquire_cas(thin, newThin,
                                (int32_t *)thinp) == 0) {
                            /*
                             * The acquire succeed.  Break out of the
                             * loop and proceed to inflate the lock.
                             */
                            break;
                        }
                    } else {
                        /*
                         * The lock has not been released.  Yield so
                         * the owning thread can run.
                         */
                        if (sleepDelayNs == 0) {
                            sched_yield();
                            sleepDelayNs = minSleepDelayNs;
                        } else {
                            tm.tv_sec = 0;
                            tm.tv_nsec = sleepDelayNs;
                            nanosleep(&tm, NULL);
                            /*
                             * Prepare the next delay value.  Wrap to
                             * avoid once a second polls for eternity.
                             */
                            if (sleepDelayNs < maxSleepDelayNs / 2) {
                                sleepDelayNs *= 2;
                            } else {
                                sleepDelayNs = minSleepDelayNs;
                            }
                        }
                    }
                } else {
                    /*
                     * The thin lock was inflated by another thread.
                     * Let the VM know we are no longer waiting and
                     * try again.
                     */
                    ALOGV("(%d) lock %p surprise-fattened",
                             threadId, &obj->lock);
                    dvmChangeStatus(self, oldStatus);
                    goto retry;
                }
            }
            ALOGV("(%d) spin on lock done %p: %#x (%#x) %#x",
                 threadId, &obj->lock, 0, *thinp, thin);
            /*
             * We have acquired the thin lock.  Let the VM know that
             * we are no longer waiting.
             */
            dvmChangeStatus(self, oldStatus);
            /*
             * Fatten the lock.
             */
            inflateMonitor(self, obj);
            ALOGV("(%d) lock %p fattened", threadId, &obj->lock);
        }
    } else {
        /*
         * The lock is a fat lock.
         */
        assert(LW_MONITOR(obj->lock) != NULL);
        lockMonitor(self, LW_MONITOR(obj->lock));
    }
}
Exemplo n.º 7
0
/*
 * Wait on a monitor until timeout, interrupt, or notification.  Used for
 * Object.wait() and (somewhat indirectly) Thread.sleep() and Thread.join().
 *
 * If another thread calls Thread.interrupt(), we throw InterruptedException
 * and return immediately if one of the following are true:
 *  - blocked in wait(), wait(long), or wait(long, int) methods of Object
 *  - blocked in join(), join(long), or join(long, int) methods of Thread
 *  - blocked in sleep(long), or sleep(long, int) methods of Thread
 * Otherwise, we set the "interrupted" flag.
 *
 * Checks to make sure that "nsec" is in the range 0-999999
 * (i.e. fractions of a millisecond) and throws the appropriate
 * exception if it isn't.
 *
 * The spec allows "spurious wakeups", and recommends that all code using
 * Object.wait() do so in a loop.  This appears to derive from concerns
 * about pthread_cond_wait() on multiprocessor systems.  Some commentary
 * on the web casts doubt on whether these can/should occur.
 *
 * Since we're allowed to wake up "early", we clamp extremely long durations
 * to return at the end of the 32-bit time epoch.
 */
static void waitMonitor(Thread* self, Monitor* mon, s8 msec, s4 nsec,
    bool interruptShouldThrow)
{
    struct timespec ts;
    bool wasInterrupted = false;
    bool timed;
    int ret;

    assert(self != NULL);
    assert(mon != NULL);

    /* Make sure that we hold the lock. */
    if (mon->owner != self) {
        dvmThrowIllegalMonitorStateException(
            "object not locked by thread before wait()");
        return;
    }

    /*
     * Enforce the timeout range.
     */
    if (msec < 0 || nsec < 0 || nsec > 999999) {
        dvmThrowIllegalArgumentException("timeout arguments out of range");
        return;
    }

    /*
     * Compute absolute wakeup time, if necessary.
     */
    if (msec == 0 && nsec == 0) {
        timed = false;
    } else {
        absoluteTime(msec, nsec, &ts);
        timed = true;
    }

    /*
     * Add ourselves to the set of threads waiting on this monitor, and
     * release our hold.  We need to let it go even if we're a few levels
     * deep in a recursive lock, and we need to restore that later.
     *
     * We append to the wait set ahead of clearing the count and owner
     * fields so the subroutine can check that the calling thread owns
     * the monitor.  Aside from that, the order of member updates is
     * not order sensitive as we hold the pthread mutex.
     */
    waitSetAppend(mon, self);
    int prevLockCount = mon->lockCount;
    mon->lockCount = 0;
    mon->owner = NULL;

    const Method* savedMethod = mon->ownerMethod;
    u4 savedPc = mon->ownerPc;
    mon->ownerMethod = NULL;
    mon->ownerPc = 0;

    /*
     * Update thread status.  If the GC wakes up, it'll ignore us, knowing
     * that we won't touch any references in this state, and we'll check
     * our suspend mode before we transition out.
     */
    if (timed)
        dvmChangeStatus(self, THREAD_TIMED_WAIT);
    else
        dvmChangeStatus(self, THREAD_WAIT);

    dvmLockMutex(&self->waitMutex);

    /*
     * Set waitMonitor to the monitor object we will be waiting on.
     * When waitMonitor is non-NULL a notifying or interrupting thread
     * must signal the thread's waitCond to wake it up.
     */
    assert(self->waitMonitor == NULL);
    self->waitMonitor = mon;

    /*
     * Handle the case where the thread was interrupted before we called
     * wait().
     */
    if (self->interrupted) {
        wasInterrupted = true;
        self->waitMonitor = NULL;
        dvmUnlockMutex(&self->waitMutex);
        goto done;
    }

    /*
     * Release the monitor lock and wait for a notification or
     * a timeout to occur.
     */
    dvmUnlockMutex(&mon->lock);

    if (!timed) {
        ret = pthread_cond_wait(&self->waitCond, &self->waitMutex);
        assert(ret == 0);
    } else {
#ifdef HAVE_TIMEDWAIT_MONOTONIC
        ret = pthread_cond_timedwait_monotonic(&self->waitCond, &self->waitMutex, &ts);
#else
        ret = pthread_cond_timedwait(&self->waitCond, &self->waitMutex, &ts);
#endif
        assert(ret == 0 || ret == ETIMEDOUT);
    }
    if (self->interrupted) {
        wasInterrupted = true;
    }

    self->interrupted = false;
    self->waitMonitor = NULL;

    dvmUnlockMutex(&self->waitMutex);

    /* Reacquire the monitor lock. */
    lockMonitor(self, mon);

done:
    /*
     * We remove our thread from wait set after restoring the count
     * and owner fields so the subroutine can check that the calling
     * thread owns the monitor. Aside from that, the order of member
     * updates is not order sensitive as we hold the pthread mutex.
     */
    mon->owner = self;
    mon->lockCount = prevLockCount;
    mon->ownerMethod = savedMethod;
    mon->ownerPc = savedPc;
    waitSetRemove(mon, self);

    /* set self->status back to THREAD_RUNNING, and self-suspend if needed */
    dvmChangeStatus(self, THREAD_RUNNING);

    if (wasInterrupted) {
        /*
         * We were interrupted while waiting, or somebody interrupted an
         * un-interruptible thread earlier and we're bailing out immediately.
         *
         * The doc sayeth: "The interrupted status of the current thread is
         * cleared when this exception is thrown."
         */
        self->interrupted = false;
        if (interruptShouldThrow) {
            dvmThrowInterruptedException(NULL);
        }
    }
}