void TileGrid::platformCALayerPaintContents(PlatformCALayer* platformCALayer, GraphicsContext& context, const FloatRect&) { #if PLATFORM(IOS) if (pthread_main_np()) WebThreadLock(); #endif { GraphicsContextStateSaver stateSaver(context); FloatPoint3D layerOrigin = platformCALayer->position(); context.translate(-layerOrigin.x(), -layerOrigin.y()); context.scale(FloatSize(m_scale, m_scale)); PlatformCALayer::RepaintRectList dirtyRects = PlatformCALayer::collectRectsToPaint(context.platformContext(), platformCALayer); PlatformCALayer::drawLayerContents(context.platformContext(), &m_controller.rootLayer(), dirtyRects); } int repaintCount = platformCALayerIncrementRepaintCount(platformCALayer); if (m_controller.rootLayer().owner()->platformCALayerShowRepaintCounter(0)) PlatformCALayer::drawRepaintIndicator(context.platformContext(), platformCALayer, repaintCount, cachedCGColor(m_controller.tileDebugBorderColor())); if (m_controller.scrollingPerformanceLoggingEnabled()) { FloatRect visiblePart(platformCALayer->position().x(), platformCALayer->position().y(), platformCALayer->bounds().size().width(), platformCALayer->bounds().size().height()); visiblePart.intersect(m_controller.visibleRect()); if (repaintCount == 1 && !visiblePart.isEmpty()) WTFLogAlways("SCROLLING: Filled visible fresh tile. Time: %f Unfilled Pixels: %u\n", WTF::monotonicallyIncreasingTime(), blankPixelCount()); } }
void TimerBase::start(double nextFireInterval, double repeatInterval) { ASSERT(m_thread == currentThread() || (isMainThread() || pthread_main_np()) && WebCoreWebThreadIsLockedOrDisabled()); m_repeatInterval = repeatInterval; setNextFireTime(currentTime() + nextFireInterval); }
void TimerBase::setNextFireTime(double newTime) { ASSERT(m_thread == currentThread() || (isMainThread() || pthread_main_np()) && WebCoreWebThreadIsLockedOrDisabled()); // Keep heap valid while changing the next-fire time. double oldTime = m_nextFireTime; if (oldTime != newTime) { m_nextFireTime = newTime; static unsigned currentHeapInsertionOrder; m_heapInsertionOrder = currentHeapInsertionOrder++; bool wasFirstTimerInHeap = m_heapIndex == 0; if (oldTime == 0) heapInsert(); else if (newTime == 0) heapDelete(); else if (newTime < oldTime) heapDecreaseKey(); else heapIncreaseKey(); bool isFirstTimerInHeap = m_heapIndex == 0; if (wasFirstTimerInHeap || isFirstTimerInHeap) threadGlobalData().threadTimers().updateSharedTimer(); } checkConsistency(); }
void StorageNamespaceImpl::sync() { ASSERT(isMainThread() || pthread_main_np()); StorageAreaMap::iterator end = m_storageAreaMap.end(); for (StorageAreaMap::iterator it = m_storageAreaMap.begin(); it != end; ++it) it->second->sync(); }
void Tk_MacOSXSetupTkNotifier() { ThreadSpecificData *tsdPtr = Tcl_GetThreadData(&dataKey, sizeof(ThreadSpecificData)); if (!tsdPtr->initialized) { /* HACK ALERT: There is a bug in Jaguar where when it goes to make * the event queue for the Main Event Loop, it stores the Current * event loop rather than the Main Event Loop in the Queue structure. * So we have to make sure that the Main Event Queue gets set up on * the main thread. Calling GetMainEventQueue will force this to * happen. */ GetMainEventQueue(); tsdPtr->initialized = 1; /* Install Carbon events event source in main event loop thread. */ if (GetCurrentEventLoop() == GetMainEventLoop()) { if (!pthread_main_np()) { /* * Panic if the Carbon main event loop thread (i.e. the * thread where HIToolbox was first loaded) is not the * main application thread, as Carbon does not support * this properly. */ Tcl_Panic("Tk_MacOSXSetupTkNotifier: %s", "first [load] of TkAqua has to occur in the main thread!"); } Tcl_CreateEventSource(CarbonEventsSetupProc, CarbonEventsCheckProc, GetMainEventQueue()); TkCreateExitHandler(TkMacOSXNotifyExitHandler, NULL); } } }
void StorageNamespaceImpl::clearOriginForDeletion(SecurityOrigin* origin) { ASSERT(isMainThread() || pthread_main_np()); RefPtr<StorageAreaImpl> storageArea = m_storageAreaMap.get(origin); if (storageArea) storageArea->clearForOriginDeletion(); }
void StorageNamespaceImpl::clearAllOriginsForDeletion() { ASSERT(isMainThread() || pthread_main_np()); StorageAreaMap::iterator end = m_storageAreaMap.end(); for (StorageAreaMap::iterator it = m_storageAreaMap.begin(); it != end; ++it) it->second->clearForOriginDeletion(); }
bool isMainThread() { #if PLATFORM(DARWIN) return pthread_main_np(); #else return currentThread() == mainThreadIdentifier; #endif }
void TimerBase::stop() { ASSERT(m_thread == currentThread() || (isMainThread() || pthread_main_np()) && WebCoreWebThreadIsLockedOrDisabled()); m_repeatInterval = 0; setNextFireTime(0); ASSERT(m_nextFireTime == 0); ASSERT(m_repeatInterval == 0); ASSERT(!inHeap()); }
StorageNamespaceImpl::~StorageNamespaceImpl() { ASSERT(isMainThread() || pthread_main_np()); if (m_storageType == LocalStorage) { ASSERT(localStorageNamespaceMap().get(m_path) == this); localStorageNamespaceMap().remove(m_path); } if (!m_isShutdown) close(); }
PassRefPtr<StorageNamespace> StorageNamespaceImpl::copy() { ASSERT(isMainThread() || pthread_main_np()); ASSERT(!m_isShutdown); ASSERT(m_storageType == SessionStorage); RefPtr<StorageNamespaceImpl> newNamespace = adoptRef(new StorageNamespaceImpl(m_storageType, m_path, m_quota)); StorageAreaMap::iterator end = m_storageAreaMap.end(); for (StorageAreaMap::iterator i = m_storageAreaMap.begin(); i != end; ++i) newNamespace->m_storageAreaMap.set(i->first, i->second->copy()); return newNamespace.release(); }
PassRefPtr<StorageArea> StorageNamespaceImpl::storageArea(PassRefPtr<SecurityOrigin> prpOrigin) { ASSERT(isMainThread() || pthread_main_np()); ASSERT(!m_isShutdown); RefPtr<SecurityOrigin> origin = prpOrigin; RefPtr<StorageAreaImpl> storageArea; if ((storageArea = m_storageAreaMap.get(origin))) return storageArea.release(); storageArea = StorageAreaImpl::create(m_storageType, origin, m_syncManager, m_quota); m_storageAreaMap.set(origin.release(), storageArea); return storageArea.release(); }
void dispatch_main(void) { #if HAVE_PTHREAD_MAIN_NP if (pthread_main_np()) { #endif _dispatch_program_is_probably_callback_driven = true; pthread_exit(NULL); DISPATCH_CRASH("pthread_exit() returned"); #if HAVE_PTHREAD_MAIN_NP } DISPATCH_CLIENT_CRASH("dispatch_main() must be called on the main thread"); #endif }
void StackBounds::initialize() { pthread_t thread = pthread_self(); m_origin = pthread_get_stackaddr_np(thread); rlim_t size = 0; if (pthread_main_np()) { // FIXME: <rdar://problem/13741204> // pthread_get_size lies to us when we're the main thread, use get_rlimit instead rlimit limit; getrlimit(RLIMIT_STACK, &limit); size = limit.rlim_cur; } else size = pthread_get_stacksize_np(thread); m_bound = static_cast<char*>(m_origin) - size; }
WTFThreadData::WTFThreadData() : m_atomicStringTable(0) , m_atomicStringTableDestructor(0) #if USE(JSC) , m_stackBounds(StackBounds::currentThreadStackBounds()) #endif { #if USE(JSC) static TI::IdentifierTable* sharedIdentifierTable = new TI::IdentifierTable(); if (pthread_main_np() || isWebThread()) m_defaultIdentifierTable = sharedIdentifierTable; else m_defaultIdentifierTable = new TI::IdentifierTable(); m_currentIdentifierTable = m_defaultIdentifierTable; #endif }
ThreadGlobalData::ThreadGlobalData() : m_eventNames(new EventNames) , m_threadTimers(new ThreadTimers) , m_xmlTypeRegExp(new XMLMIMETypeRegExp) #ifndef NDEBUG , m_isMainThread(pthread_main_np() || isMainThread()) #endif #if USE(ICU_UNICODE) , m_cachedConverterICU(new ICUConverterWrapper) #endif { // This constructor will have been called on the main thread before being called on // any other thread, and is only called once per thread - this makes this a convenient // point to call methods that internally perform a one-time initialization that is not // threadsafe. wtfThreadData(); StringImpl::empty(); }
void AtomicString::init() { static bool initialized; if (!initialized) { // On iPhone WebKit, isMainThread() tests for the Web Thread, so use pthread_main_np() instead. ASSERT(pthread_main_np()); // Use placement new to initialize the globals. new ((void*)&nullAtom) AtomicString; new ((void*)&emptyAtom) AtomicString(""); new ((void*)&textAtom) AtomicString("#text"); new ((void*)&commentAtom) AtomicString("#comment"); new ((void*)&starAtom) AtomicString("*"); new ((void*)&xmlAtom) AtomicString("xml"); new ((void*)&xmlnsAtom) AtomicString("xmlns"); initialized = true; } }
void mono_threads_core_get_stack_bounds (guint8 **staddr, size_t *stsize) { *staddr = (guint8*)pthread_get_stackaddr_np (pthread_self()); *stsize = pthread_get_stacksize_np (pthread_self()); #ifdef TARGET_OSX /* * Mavericks reports stack sizes as 512kb: * http://permalink.gmane.org/gmane.comp.java.openjdk.hotspot.devel/11590 * https://bugs.openjdk.java.net/browse/JDK-8020753 */ if (pthread_main_np () && *stsize == 512 * 1024) *stsize = 2048 * mono_pagesize (); #endif /* staddr points to the start of the stack, not the end */ *staddr -= *stsize; }
kern_return_t bootstrap_look_up_per_user(mach_port_t bp, name_t service_name, uid_t target_user, mach_port_t *sp) { struct stat sb; kern_return_t kr; mach_port_t puc; if (pthread_main_np() && (stat("/AppleInternal", &sb) != -1)) { _vproc_log(LOG_WARNING, "Please review the comments in 4890134."); } if ((kr = vproc_mig_lookup_per_user_context(bp, target_user, &puc)) != 0) { return kr; } kr = vproc_mig_look_up2(puc, service_name, sp, 0, 0); mach_port_deallocate(mach_task_self(), puc); return kr; }
mtctxres_t * ___mtctxres(void) { #ifdef DO_PTHREADS mtctxres_t *mt; #ifdef _LIBC #ifndef __rtems__ if (pthread_main_np() != 0) return (&sharedctx); #endif /* __rtems__ */ #endif /* * This if clause should only be executed if we are linking * statically. When linked dynamically _mtctxres_init() should * be called at binding time due the #pragma above. */ if (!mt_key_initialized) { static pthread_mutex_t keylock = PTHREAD_MUTEX_INITIALIZER; if (pthread_mutex_lock(&keylock) == 0) { _mtctxres_init(); (void) pthread_mutex_unlock(&keylock); } } /* * If we have already been called in this thread return the existing * context. Otherwise recreat a new context and return it. If * that fails return a global context. */ if (mt_key_initialized) { if (((mt = pthread_getspecific(key)) != 0) || (__res_init_ctx() == 0 && (mt = pthread_getspecific(key)) != 0)) { return (mt); } } #endif return (&sharedctx); }
void StorageNamespaceImpl::close() { ASSERT(isMainThread() || pthread_main_np()); if (m_isShutdown) return; // If we're session storage, we shouldn't need to do any work here. if (m_storageType == SessionStorage) { ASSERT(!m_syncManager); return; } StorageAreaMap::iterator end = m_storageAreaMap.end(); for (StorageAreaMap::iterator it = m_storageAreaMap.begin(); it != end; ++it) it->second->close(); if (m_syncManager) m_syncManager->close(); m_isShutdown = true; }
static void logInternalv(FILE * file, const char * user, const char * filename, unsigned int line, int dumpStack, const char * format, va_list argp) { if (!MCLogEnabled) return; while (1) { const char * p = filename; p = strchr(filename, '/'); if (p == NULL) { break; } filename = p + 1; } struct timeval tv; struct tm tm_value; pthread_t thread_id = pthread_self(); #if defined(ANDROID) || defined(__ANDROID__) __android_log_vprint(ANDROID_LOG_INFO, filename, format, argp); #else gettimeofday(&tv, NULL); time_t timevalue_sec = tv.tv_sec; localtime_r(&timevalue_sec, &tm_value); fprintf(file, "%04u-%02u-%02u %02u:%02u:%02u.%03u ", tm_value.tm_year + 1900, tm_value.tm_mon + 1, tm_value.tm_mday, tm_value.tm_hour, tm_value.tm_min, tm_value.tm_sec, (int) (tv.tv_usec / 1000)); #ifdef __MACH__ if (pthread_main_np()) { #else if (0) { #endif fprintf(file, "[%i:main] %s:%i: ", sPid, filename, line); } else { unsigned long threadValue; #ifdef _MACH_PORT_T threadValue = pthread_mach_thread_np(thread_id); #elif _MSC_VER threadValue = (unsigned long) thread_id.p; #else threadValue = (unsigned long) thread_id; #endif fprintf(file, "[%i:%lx] %s:%i: ", sPid, threadValue, filename, line); } vfprintf(file, format, argp); fprintf(file, "\n"); if (dumpStack) { #if __APPLE__ void * frame[128]; int frameCount; int i; fprintf(file, " "); frameCount = backtrace(frame, 128); for(i = 0 ; i < frameCount ; i ++) { fprintf(file, " %p", frame[i]); } fprintf(file, "\n"); #endif // TODO: other platforms implemented needed. } #endif }
Boolean _CFIsMainThread(void) { return pthread_main_np() == 1; }
static OSStatus BindReplyMachPortToThread(mach_port_t *replyPortPtr) // Get a reply port for this thread, remembering that we've done this // in per-thread storage. // // On success, *replyPortPtr is the port to use for this thread's reply // port. It will be MACH_PORT_NULL if you call it from the main thread. { OSStatus err; assert( replyPortPtr != NULL); assert(*replyPortPtr == MACH_PORT_NULL); // Initialise ourselves the first time that we're called. err = (OSStatus) pthread_once(&sInited, InitRoutine); // If something went wrong, return the latched error. if ( (err == noErr) && (sPerThreadStorageKeyInitErrNum != noErr) ) { err = sPerThreadStorageKeyInitErrNum; } // Now do the real work. if (err == noErr) { if ( pthread_main_np() ) { // This is the main thread, so do nothing; leave *replyPortPtr set // to MACH_PORT_NULL. assert(*replyPortPtr == MACH_PORT_NULL); } else { PerThreadStorage * storage; // Get the per-thread storage for this thread. storage = (PerThreadStorage *) pthread_getspecific(sPerThreadStorageKey); if (storage == NULL) { // The per-thread storage hasn't been allocated yet for this specific // thread. Let's go allocate it and attach it to this thread. err = AllocatePortFromPool(&storage); if (err == noErr) { err = (OSStatus) pthread_setspecific(sPerThreadStorageKey, (void *) storage); if (err != noErr) { ReturnPortToPool(storage); storage = NULL; } } } assert( (err == noErr) == (storage != NULL) ); // If all went well, copy the port out to our client. if (err == noErr) { assert(storage->magic == kPerThreadStorageMagic); assert(storage->port != MACH_PORT_NULL); *replyPortPtr = storage->port; } } } // no error + MACH_PORT_NULL is a valid response if we're on the main // thread. // // assert( (err == noErr) == (*replyPortPtr != MACH_PORT_NULL) ); assert( (*replyPortPtr == MACH_PORT_NULL) || (err == noErr) ); return err; }
void mono_threads_core_get_stack_bounds (guint8 **staddr, size_t *stsize) { #if defined(HAVE_PTHREAD_GET_STACKSIZE_NP) && defined(HAVE_PTHREAD_GET_STACKADDR_NP) /* Mac OS X */ *staddr = (guint8*)pthread_get_stackaddr_np (pthread_self()); *stsize = pthread_get_stacksize_np (pthread_self()); #ifdef TARGET_OSX /* * Mavericks reports stack sizes as 512kb: * http://permalink.gmane.org/gmane.comp.java.openjdk.hotspot.devel/11590 * https://bugs.openjdk.java.net/browse/JDK-8020753 */ if (pthread_main_np () && *stsize == 512 * 1024) *stsize = 2048 * mono_pagesize (); #endif /* staddr points to the start of the stack, not the end */ *staddr -= *stsize; /* When running under emacs, sometimes staddr is not aligned to a page size */ *staddr = (guint8*)((gssize)*staddr & ~(mono_pagesize() - 1)); return; #elif (defined(HAVE_PTHREAD_GETATTR_NP) || defined(HAVE_PTHREAD_ATTR_GET_NP)) && defined(HAVE_PTHREAD_ATTR_GETSTACK) /* Linux, BSD */ pthread_attr_t attr; guint8 *current = (guint8*)&attr; *staddr = NULL; *stsize = (size_t)-1; pthread_attr_init (&attr); #if defined(HAVE_PTHREAD_GETATTR_NP) /* Linux */ pthread_getattr_np (pthread_self(), &attr); #elif defined(HAVE_PTHREAD_ATTR_GET_NP) /* BSD */ pthread_attr_get_np (pthread_self(), &attr); #else #error Cannot determine which API is needed to retrieve pthread attributes. #endif pthread_attr_getstack (&attr, (void**)staddr, stsize); pthread_attr_destroy (&attr); if (*staddr) g_assert ((current > *staddr) && (current < *staddr + *stsize)); /* When running under emacs, sometimes staddr is not aligned to a page size */ *staddr = (guint8*)((gssize)*staddr & ~(mono_pagesize () - 1)); return; #elif defined(__OpenBSD__) /* OpenBSD */ /* TODO : Determine if this code is actually still needed. It may already be covered by the case above. */ pthread_attr_t attr; guint8 *current = (guint8*)&attr; *staddr = NULL; *stsize = (size_t)-1; pthread_attr_init (&attr); stack_t ss; int rslt; rslt = pthread_stackseg_np(pthread_self(), &ss); g_assert (rslt == 0); *staddr = (guint8*)((size_t)ss.ss_sp - ss.ss_size); *stsize = ss.ss_size; pthread_attr_destroy (&attr); if (*staddr) g_assert ((current > *staddr) && (current < *staddr + *stsize)); /* When running under emacs, sometimes staddr is not aligned to a page size */ *staddr = (guint8*)((gssize)*staddr & ~(mono_pagesize () - 1)); return; #elif defined(sun) || defined(__native_client__) /* Solaris/Illumos, NaCl */ pthread_attr_t attr; pthread_attr_init (&attr); pthread_attr_getstacksize (&attr, &stsize); pthread_attr_destroy (&attr); *staddr = NULL; return; #else /* FIXME: It'd be better to use the 'error' preprocessor macro here so we know at compile-time if the target platform isn't supported. */ #warning "Unable to determine how to retrieve a thread's stack-bounds for this platform in 'mono_thread_get_stack_bounds()'." *staddr = NULL; *stsize = 0; return; #endif }
static int dill_ismain() { return pthread_main_np(); }
if (!allowsCrossOriginMethod(method, ignoredExplanation)) return false; if (!allowsCrossOriginHeaders(requestHeaders, ignoredExplanation)) return false; return true; } bool isMainThread() //ricardo: agregando este metodo pues lo estaba agarrando de NSThread { return true; } CrossOriginPreflightResultCache& CrossOriginPreflightResultCache::shared() { DEFINE_STATIC_LOCAL(CrossOriginPreflightResultCache, cache, ()); ASSERT(isMainThread() || pthread_main_np()); return cache; } void CrossOriginPreflightResultCache::appendEntry(const String& origin, const KURL& url, PassOwnPtr<CrossOriginPreflightResultCacheItem> preflightResult) { ASSERT(isMainThread() || pthread_main_np()); CrossOriginPreflightResultCacheItem* resultPtr = preflightResult.leakPtr(); pair<CrossOriginPreflightResultHashMap::iterator, bool> addResult = m_preflightHashMap.add(make_pair(origin, url), resultPtr); if (!addResult.second) { // FIXME: We need to delete the old value before replacing with the new one. addResult.first->second = resultPtr; } } bool CrossOriginPreflightResultCache::canSkipPreflight(const String& origin, const KURL& url, bool includeCredentials, const String& method, const HTTPHeaderMap& requestHeaders)
CF_CROSS_PLATFORM_EXPORT Boolean _CFIsMainThread(void) { return pthread_main_np() == 1; }
size_t TclpThreadGetStackSize(void) { size_t stackSize = 0; #if defined(HAVE_PTHREAD_ATTR_SETSTACKSIZE) && defined(TclpPthreadGetAttrs) pthread_attr_t threadAttr; /* This will hold the thread attributes for * the current thread. */ #ifdef __GLIBC__ /* * Fix for [Bug 1815573] * * DESCRIPTION: * On linux TclpPthreadGetAttrs (which is pthread_attr_get_np) may return * bogus values on the initial thread. * * ASSUMPTIONS: * There seems to be no api to determine if we are on the initial * thread. The simple scheme implemented here assumes: * 1. The first Tcl interp to be created lives in the initial thread. If * this assumption is not true, the fix is to call * TclpThreadGetStackSize from the initial thread previous to * creating any Tcl interpreter. In this case, especially if another * Tcl interpreter may be created in the initial thread, it might be * better to enable the second branch in the #if below * 2. There will be no races in creating the first Tcl interp - ie, the * second Tcl interp will be created only after the first call to * Tcl_CreateInterp returns. * * These assumptions are satisfied by tclsh. Embedders on linux may want * to check their validity, and possibly adapt the code on failing to meet * them. */ static int initialized = 0; if (!initialized) { initialized = 1; return 0; } else { #else { #endif if (pthread_attr_init(&threadAttr) != 0) { return -1; } if (TclpPthreadGetAttrs(pthread_self(), &threadAttr) != 0) { pthread_attr_destroy(&threadAttr); return (size_t)-1; } } if (pthread_attr_getstacksize(&threadAttr, &stackSize) != 0) { pthread_attr_destroy(&threadAttr); return (size_t)-1; } pthread_attr_destroy(&threadAttr); #elif defined(HAVE_PTHREAD_GET_STACKSIZE_NP) #ifdef __APPLE__ /* * On Darwin, the API below does not return the correct stack size for the * main thread (which is not a real pthread), so fallback to getrlimit(). */ if (!pthread_main_np()) #endif stackSize = pthread_get_stacksize_np(pthread_self()); #else /* * Cannot determine the real stack size of this thread. The caller might * want to try looking at the process accounting limits instead. */ #endif return stackSize; } #endif /* TCL_THREADS */ /* *---------------------------------------------------------------------- * * Tcl_GetCurrentThread -- * * This procedure returns the ID of the currently running thread. * * Results: * A thread ID. * * Side effects: * None. * *---------------------------------------------------------------------- */ Tcl_ThreadId Tcl_GetCurrentThread(void) { #ifdef TCL_THREADS return (Tcl_ThreadId) pthread_self(); #else return (Tcl_ThreadId) 0; #endif } /* *---------------------------------------------------------------------- * * TclpInitLock * * This procedure is used to grab a lock that serializes initialization * and finalization of Tcl. On some platforms this may also initialize * the mutex used to serialize creation of more mutexes and thread local * storage keys. * * Results: * None. * * Side effects: * Acquire the initialization mutex. * *---------------------------------------------------------------------- */ void TclpInitLock(void) { #ifdef TCL_THREADS pthread_mutex_lock(&initLock); #endif }
gboolean mono_threads_platform_is_main_thread (void) { return pthread_main_np () == 1; }