static FunctionMeta* FindFunction(lua_State* L, lua_Debug* ar) { // This is slow, it involves string formatting. It's mostly OK, because we're // careful not to include this in the timings. It will of course affect cache // and other things. Not that we can make a lot of informed decisions about // that in an interpreted language anyway. if (!lua_getinfo(L, "Sn", ar)) Croak("couldn't get debug info for function call"); char buffer[1024]; snprintf(buffer, sizeof(buffer), "%s;%s;%s;%d", ar->name ? ar->name : "", ar->namewhat ? ar->namewhat : "", ar->source, ar->linedefined); buffer[(sizeof buffer)-1] = 0; const uint32_t hash = Djb2Hash(buffer); HashRecord* r = HashTableLookup(&s_Profiler.m_Functions, hash, buffer); if (!r) { r = LinearAllocate<FunctionMeta>(s_Profiler.m_Allocator); r->m_Hash = hash; r->m_String = StrDup(s_Profiler.m_Allocator, buffer); r->m_Next = nullptr; HashTableInsert(&s_Profiler.m_Functions, r); } return static_cast<FunctionMeta*>(r); }
void* HeapReallocate(MemAllocHeap *heap, void *ptr, size_t size) { bool thread_safe = 0 != (heap->m_Flags & HeapFlags::kThreadSafe); if (thread_safe) { MutexLock(&heap->m_Lock); } void *new_ptr; #if ENABLED(USE_DLMALLOC) new_ptr = mspace_realloc(heap->m_MemSpace, ptr, size); #else new_ptr = realloc(ptr, size); #endif if (!new_ptr && size > 0) { Croak("out of memory reallocating %d bytes at %p", (int) size, ptr); } if (thread_safe) { MutexUnlock(&heap->m_Lock); } return new_ptr; }
void* HeapAllocate(MemAllocHeap* heap, size_t size) { bool thread_safe = 0 != (heap->m_Flags & HeapFlags::kThreadSafe); if (thread_safe) { MutexLock(&heap->m_Lock); } void* ptr = nullptr; #if ENABLED(USE_DLMALLOC) ptr = mspace_malloc(heap->m_MemSpace, size); #else ptr = malloc(size); #endif if (!ptr) { Croak("out of memory allocating %d bytes", (int) size); } if (thread_safe) { MutexUnlock(&heap->m_Lock); } return ptr; }
// Unmap an mmaped file from RAM. void MmapFileUnmap(MemoryMappedFile* self) { if (self->m_Address) { TimingScope timing_scope(&g_Stats.m_MunmapCalls, &g_Stats.m_MunmapTimeCycles); if (0 != munmap(self->m_Address, self->m_Size)) Croak("munmap(%p, %d) failed: %d", self->m_Address, (int) self->m_Size, errno); close((int) self->m_SysData[0]); } Clear(self); }
void HeapInit(MemAllocHeap* heap, size_t capacity, uint32_t flags) { #if ENABLED(USE_DLMALLOC) heap->m_MemSpace = create_mspace(capacity, 0); if (!heap->m_MemSpace) Croak("couldn't create memspace for new heap"); #else heap->m_MemSpace = nullptr; #endif heap->m_Flags = flags; if (flags & HeapFlags::kThreadSafe) { MutexInit(&heap->m_Lock); } }
void PathFormatPartial(char (&output)[kMaxPathLength], const PathBuffer* buffer, int start_seg, int end_seg) { char *cursor = &output[0]; char pathsep = PathType::kWindows == buffer->m_Type ? '\\' : '/'; if (start_seg == 0 && PathBuffer::kFlagAbsolute == (buffer->m_Flags & (PathBuffer::kFlagAbsolute|PathBuffer::kFlagWindowsDevicePath))) { *cursor++ = pathsep; } // Emit all leading ".." tokens we've got left for (int i = 0, count = buffer->m_LeadingDotDots; i < count; ++i) { *cursor++ = '.'; *cursor++ = '.'; *cursor++ = pathsep; } // Emit all remaining tokens. uint16_t off = 0; for (int i = 0; i < start_seg; ++i) { off += buffer->SegLength(i); } for (int i = start_seg; i <= end_seg; ++i) { uint16_t len = buffer->SegLength(i); if ((cursor - &output[0]) + len + 1 >= kMaxPathLength) Croak("Path too long"); if (i > start_seg) *cursor++ = pathsep; memcpy(cursor, buffer->m_Data + off, len); cursor += len; off += len; } *cursor = 0; }
void ScanCacheSetCache(ScanCache* self, const ScanData* frozen_data) { self->m_FrozenData = frozen_data; if (frozen_data) { self->m_FrozenAccess = HeapAllocateArrayZeroed<uint8_t>(self->m_Heap, frozen_data->m_EntryCount); Log(kDebug, "Scan cache initialized from frozen data - %u entries", frozen_data->m_EntryCount); #if ENABLED(CHECKED_BUILD) // Paranoia - make sure the cache is sorted. for (int i = 1, count = frozen_data->m_EntryCount; i < count; ++i) { if (frozen_data->m_Keys[i] < frozen_data->m_Keys[i - 1]) Croak("Header scanning cache is not sorted"); } #endif } }
static int PathGetSegments(const char* scratch, PathSeg segments[kMaxPathSegments]) { const char *start = scratch; int segcount = 0; const char *last = scratch; for (;;) { char ch = *scratch; if ('\\' == ch || '/' == ch || '\0' == ch) { int len = (int) (scratch - last); if (len > 0) { int is_dotdot = 2 == len && 0 == memcmp("..", last, 2); int is_dot = 1 == len && '.' == last[0]; if (segcount == kMaxPathSegments) Croak("too many segments in path; limit is %d", kMaxPathSegments); segments[segcount].offset = (uint16_t) (last - start); segments[segcount].len = (uint16_t) len; segments[segcount].dotdot = (uint8_t) is_dotdot; segments[segcount].drop = (uint8_t) is_dot; ++segcount; } last = scratch + 1; if ('\0' == ch) break; } ++scratch; } return segcount; }
void PathInit(PathBuffer* buffer, const char* path, PathType::Enum type) { // Initialize buffer->m_Type = type; buffer->m_Flags = 0; // Check to see if the path is absolute switch (type) { case PathType::kUnix: if ('/' == path[0]) { buffer->m_Flags |= PathBuffer::kFlagAbsolute; path++; } break; case PathType::kWindows: // Check for absolute path w/o device name if ('\\' == path[0] || '/' == path[0]) { buffer->m_Flags |= PathBuffer::kFlagAbsolute; path++; } // Check for X:\ style path else if (isalpha(path[0]) && ':' == path[1] && ('\\' == path[2] || '/' == path[2])) { buffer->m_Flags |= PathBuffer::kFlagAbsolute | PathBuffer::kFlagWindowsDevicePath; } // FIXME: network paths break; default: Croak("bad path type"); break; } // Initialize segment data PathSeg segments[kMaxPathSegments]; int raw_seg_count = PathGetSegments(path, segments); uint16_t dotdot_drops = 0; // Drop segments based on .. following them for (int i = raw_seg_count - 1; i >= 0; --i) { if (segments[i].drop) continue; if (segments[i].dotdot) { ++dotdot_drops; segments[i].drop = 1; } else if (dotdot_drops > 0) { --dotdot_drops; segments[i].drop = 1; } } buffer->m_LeadingDotDots = dotdot_drops; // Copy retained segments to output array uint16_t output_seg_count = 0; uint16_t output_pos = 0; for (int i = 0; i < raw_seg_count; ++i) { if (segments[i].drop) continue; if (output_pos >= kMaxPathLength) Croak("Path too long: %s", path); memcpy(buffer->m_Data + output_pos, path + segments[i].offset, segments[i].len); output_pos += segments[i].len; buffer->m_SegEnds[output_seg_count] = output_pos; ++output_seg_count; } buffer->m_SegCount = output_seg_count; }
static void AdvanceNode(BuildQueue* queue, ThreadState* thread_state, NodeState* node, Mutex* queue_lock) { Log(kSpam, "T=%d, [%d] Advancing %s\n", thread_state->m_ThreadIndex, node->m_Progress, node->m_MmapData->m_Annotation.Get()); CHECK(!NodeStateIsCompleted(node)); CHECK(NodeStateIsActive(node)); CHECK(!NodeStateIsQueued(node)); for (;;) { switch (node->m_Progress) { case BuildProgress::kInitial: node->m_Progress = SetupDependencies(queue, node); if (BuildProgress::kBlocked == node->m_Progress) { // Set ourselves as inactive until our dependencies are ready. NodeStateFlagInactive(node); return; } else break; case BuildProgress::kBlocked: CHECK(AllDependenciesReady(queue, node)); node->m_Progress = BuildProgress::kUnblocked; break; case BuildProgress::kUnblocked: node->m_Progress = CheckInputSignature(queue, thread_state, node, queue_lock); break; case BuildProgress::kRunAction: node->m_Progress = RunAction(queue, thread_state, node, queue_lock); // If we couldn't make progress, we're a parked expensive node. // Another expensive job will put us back on the queue later when it // has finshed. if (BuildProgress::kRunAction == node->m_Progress) return; // Otherwise, we just ran our action. If we were an expensive node, // make sure to let other expensive nodes on to the cores now. if (node->m_MmapData->m_Flags & NodeData::kFlagExpensive) { --queue->m_ExpensiveRunning; CHECK(queue->m_ExpensiveRunning >= 0); // We were an expensive job. We can unpark another expensive job if // anything is waiting. UnparkExpensiveNode(queue); } break; case BuildProgress::kSucceeded: case BuildProgress::kUpToDate: node->m_BuildResult = 0; node->m_Progress = BuildProgress::kCompleted; break; case BuildProgress::kFailed: queue->m_FailedNodeCount++; CondBroadcast(&queue->m_WorkAvailable); node->m_BuildResult = 1; node->m_Progress = BuildProgress::kCompleted; break; case BuildProgress::kCompleted: queue->m_PendingNodeCount--; UnblockWaiters(queue, node); CondBroadcast(&queue->m_WorkAvailable); return; default: Croak("invalid node state progress"); break; } } }