static void mm_event_kqueue_poll(struct mm_event_kqueue *event_backend, mm_timeout_t timeout) { ENTER(); DEBUG("poll: changes: %d, timeout: %lu", event_backend->nevents, (unsigned long) timeout); // Calculate the event wait timeout. struct timespec ts; ts.tv_sec = timeout / 1000000; ts.tv_nsec = (timeout % 1000000) * 1000; // Publish the log before a possible sleep. mm_log_relay(); // Poll the system for events. int n = kevent(event_backend->event_fd, event_backend->events, event_backend->nevents, event_backend->events, MM_EVENT_KQUEUE_NEVENTS, &ts); DEBUG("kevent changed: %d, received: %d", event_backend->nevents, n); if (n < 0) { if (errno == EINTR) mm_warning(errno, "kevent"); else mm_error(errno, "kevent"); event_backend->nevents = 0; } else { event_backend->nevents = n; } LEAVE(); }
void *sbrk(size_t incr) { size_t brk_size = current->mm->brk_end - current->mm->brk_start; void *new_ptr = NULL; if ((brk_size + incr) > UBRK_LIMIT) { mm_error("User brk region limitation reached.\n"); return (void *)(-1); } new_ptr = (void *)current->mm->brk_end; current->mm->brk_end += incr; return new_ptr; }
/******************************************************************************* * delMObject * 从链表里面删除节点,并返回被删除的节点 * ARGS: * mobject 将被删除的节点 * *****************************************************************************/ static inline MObject *delMObject(MObject *mobject){ MObject *next,*prev; if(!(mobject)){ mm_error("Trying to remove an empty object"); return NULL; } next = mobject->next; prev = mobject->prev; if(!!(prev)){ prev->next = next; }else{ heap->objectList[log2(mobject->length)] = next; } if(!!(next)){ next->prev = prev; } mobject->busy = M_BUSY; return mobject; }
mm_chunk_enqueue_deferred(struct mm_thread *thread, bool flush) { if (!flush && thread->deferred_chunks_count < MM_CHUNK_FLUSH_THRESHOLD) return; // Capture all the deferred chunks. struct mm_stack chunks = thread->deferred_chunks; mm_stack_prepare(&thread->deferred_chunks); thread->deferred_chunks_count = 0; // Try to submit the chunks to respective reclamation queues. while (!mm_stack_empty(&chunks)) { struct mm_chunk *chunk = mm_chunk_stack_remove(&chunks); struct mm_domain *domain = mm_regular_domain; #if ENABLE_SMP mm_chunk_t tag = mm_chunk_gettag(chunk); struct mm_thread *origin = mm_domain_getthread(domain, tag); #else struct mm_thread *origin = mm_domain_getthread(domain, 0); #endif uint32_t backoff = 0; while (!mm_thread_trypost_1(origin, mm_chunk_free_req, (uintptr_t) chunk)) { if (backoff >= MM_BACKOFF_SMALL) { // If failed to submit the chunk after a number // of attempts then defer it again. mm_chunk_stack_insert(&thread->deferred_chunks, chunk); thread->deferred_chunks_count++; break; } backoff = mm_thread_backoff(backoff); } } // Let know if chunk reclamation consistently has problems. if (thread->deferred_chunks_count > MM_CHUNK_ERROR_THRESHOLD) { if (thread->deferred_chunks_count < MM_CHUNK_FATAL_THRESHOLD) mm_error(0, "Problem with chunk reclamation"); else mm_fatal(0, "Problem with chunk reclamation"); } }
static ps_sd *ps_sd_new(ps_mm *data, const char *key) { php_uint32 hv, slot; ps_sd *sd; int keylen; keylen = strlen(key); sd = mm_malloc(data->mm, sizeof(ps_sd) + keylen); if (!sd) { php_error_docref(NULL, E_WARNING, "mm_malloc failed, avail %ld, err %s", mm_available(data->mm), mm_error()); return NULL; } hv = ps_sd_hash(key, keylen); slot = hv & data->hash_max; sd->ctime = 0; sd->hv = hv; sd->data = NULL; sd->alloclen = sd->datalen = 0; memcpy(sd->key, key, keylen + 1); sd->next = data->hash[slot]; data->hash[slot] = sd; data->hash_cnt++; if (!sd->next) { if (data->hash_cnt >= data->hash_max) { hash_split(data); } } ps_mm_debug(("inserting %s(%p) into slot %d\n", key, sd, slot)); return sd; }