/* returns -1 on error. */ int CFrom::ToStr(char** dest) { char* url; char* buf; int i; size_t len; *dest = NULL; if ((this->url == NULL)) return -1; i = this->url->ToStr(&url); if (i != 0) return -1; if (this->displayname == NULL) len = strlen(url) + 5; else len = strlen(url) + strlen(this->displayname) + 5; buf = (char *) mm_malloc(len); if (buf == NULL) { mm_free(url); return -1; } if (this->displayname != NULL) sprintf(buf, "%s <%s>", this->displayname, url); else /* from rfc2543bis-04: for authentication related issue! "The To and From header fields always include the < and > delimiters even if the display-name is empty." */ sprintf(buf, "<%s>", url); mm_free(url); { int pos = 0; CUrlParam* u_param; size_t plen; char* tmp; while (!this->gen_params.IsListEof(pos)) { u_param = (CUrlParam *) this->gen_params.GetAt(pos); if (u_param->gvalue == NULL) plen = strlen(u_param->gname) + 2; else plen = strlen(u_param->gname) + strlen(u_param->gvalue) + 3; len = len + plen; buf = (char *) mm_realloc(buf, len); tmp = buf; tmp = tmp + strlen(tmp); if (u_param->gvalue == NULL) sprintf(tmp, ";%s", u_param->gname); else sprintf(tmp, ";%s=%s", u_param->gname, u_param->gvalue); pos++; } } *dest = buf; return 0; }
static int epoll_dispatch(struct event_base *base, struct timeval *tv) { struct epollop *epollop = base->evbase; struct epoll_event *events = epollop->events; int i, res; long timeout = -1; if (tv != NULL) { timeout = evutil_tv_to_msec(tv); if (timeout < 0 || timeout > MAX_EPOLL_TIMEOUT_MSEC) { /* Linux kernels can wait forever if the timeout is * too big; see comment on MAX_EPOLL_TIMEOUT_MSEC. */ timeout = MAX_EPOLL_TIMEOUT_MSEC; } } epoll_apply_changes(base); event_changelist_remove_all(&base->changelist, base); EVBASE_RELEASE_LOCK(base, th_base_lock); res = epoll_wait(epollop->epfd, events, epollop->nevents, timeout); EVBASE_ACQUIRE_LOCK(base, th_base_lock); if (res == -1) { if (errno != EINTR) { event_warn("epoll_wait"); return (-1); } return (0); } event_debug(("%s: epoll_wait reports %d", __func__, res)); EVUTIL_ASSERT(res <= epollop->nevents); for (i = 0; i < res; i++) { int what = events[i].events; short ev = 0; if (what & (EPOLLHUP|EPOLLERR)) { ev = EV_READ | EV_WRITE; } else { if (what & EPOLLIN) ev |= EV_READ; if (what & EPOLLOUT) ev |= EV_WRITE; } if (!ev) continue; evmap_io_active(base, events[i].data.fd, ev | EV_ET); } if (res == epollop->nevents && epollop->nevents < MAX_NEVENT) { /* We used all of the event space this time. We should be ready for more events next time. */ int new_nevents = epollop->nevents * 2; struct epoll_event *new_events; new_events = mm_realloc(epollop->events, new_nevents * sizeof(struct epoll_event)); if (new_events) { epollop->events = new_events; epollop->nevents = new_nevents; } } return (0); }
/****************************************************************************** * Read from the current position to the end of the current line. * Current position is assumed to be start of a new sequence. * * Returns TRUE if it was able to read the sequence text, FALSE if * EOF reached before the terminal newline was found. Dies if other errors * are encountered. *****************************************************************************/ BOOLEAN_T read_sequence_from_prior_reader_from_psp( PSP_DATA_BLOCK_READER_T *psp_reader ) { int result = FALSE; // Initial allocation of sequence buffer const size_t initial_buffer_len = 100; if (psp_reader->sequence_header == NULL) { psp_reader->sequence_header = mm_malloc(sizeof(char) * initial_buffer_len); psp_reader->sequence_buffer_len = initial_buffer_len; } // Look for EOL int c = 0; int seq_index = 0; while((c = fgetc(psp_reader->psp_file)) != EOF) { if (seq_index >= psp_reader->sequence_buffer_len) { // Need to grow buffer psp_reader->sequence_header = mm_realloc( psp_reader->sequence_header, 2 * psp_reader->sequence_buffer_len ); psp_reader->sequence_buffer_len = 2 * psp_reader->sequence_buffer_len; } if (c == '\n') { psp_reader->sequence_header[seq_index] = '\0'; psp_reader->sequence_header_len = seq_index + 1; psp_reader->at_start_of_line = TRUE; result = TRUE; break; } else { psp_reader->sequence_header[seq_index] = c; ++seq_index; } } // At this point c is EOL or EOF if (c == EOF) { if (ferror(psp_reader->psp_file)) { // EOF could actually indicate an error. die( "Error reading file:%s.\nError message: %s\n", psp_reader->filename, strerror(ferror(psp_reader->psp_file)) ); } else if (feof(psp_reader->psp_file)) { // Reached EOF before reaching EOL for the sequence. psp_reader->sequence_header[0] = '\0'; psp_reader->sequence_header_len = 0; } } return result; }
static int poll_dispatch(struct event_base *base, struct timeval *tv) { int res, i, j, nfds; long msec = -1; struct pollop *pop = base->evbase; struct pollfd *event_set; poll_check_ok(pop); nfds = pop->nfds; #ifndef EVENT__DISABLE_THREAD_SUPPORT if (base->th_base_lock) { /* If we're using this backend in a multithreaded setting, * then we need to work on a copy of event_set, so that we can * let other threads modify the main event_set while we're * polling. If we're not multithreaded, then we'll skip the * copy step here to save memory and time. */ if (pop->realloc_copy) { struct pollfd *tmp = mm_realloc(pop->event_set_copy, pop->event_count * sizeof(struct pollfd)); if (tmp == NULL) { event_warn("realloc"); return -1; } pop->event_set_copy = tmp; pop->realloc_copy = 0; } memcpy(pop->event_set_copy, pop->event_set, sizeof(struct pollfd)*nfds); event_set = pop->event_set_copy; } else { event_set = pop->event_set; } #else event_set = pop->event_set; #endif if (tv != NULL) { msec = evutil_tv_to_msec_(tv); if (msec < 0 || msec > INT_MAX) msec = INT_MAX; } EVBASE_RELEASE_LOCK(base, th_base_lock); res = poll(event_set, nfds, msec); EVBASE_ACQUIRE_LOCK(base, th_base_lock); if (res == -1) { if (errno != EINTR) { event_warn("poll"); return (-1); } return (0); } event_debug(("%s: poll reports %d", __func__, res)); if (res == 0 || nfds == 0) return (0); i = evutil_weakrand_range_(&base->weakrand_seed, nfds); for (j = 0; j < nfds; j++) { int what; if (++i == nfds) i = 0; what = event_set[i].revents; if (!what) continue; res = 0; /* If the file gets closed notify */ if (what & (POLLHUP|POLLERR|POLLNVAL)) what |= POLLIN|POLLOUT; if (what & POLLIN) res |= EV_READ; if (what & POLLOUT) res |= EV_WRITE; if (res == 0) continue; evmap_io_active_(base, event_set[i].fd, res); } return (0); }
static int select_dispatch(struct event_base *base, struct timeval *tv) { int res=0, i, j, nfds; struct selectop *sop = base->evbase; check_selectop(sop); if (sop->resize_out_sets) { fd_set *readset_out=NULL, *writeset_out=NULL; size_t sz = sop->event_fdsz; if (!(readset_out = mm_realloc(sop->event_readset_out, sz))) return (-1); sop->event_readset_out = readset_out; if (!(writeset_out = mm_realloc(sop->event_writeset_out, sz))) { /* We don't free readset_out here, since it was * already successfully reallocated. The next time * we call select_dispatch, the realloc will be a * no-op. */ return (-1); } sop->event_writeset_out = writeset_out; sop->resize_out_sets = 0; } memcpy(sop->event_readset_out, sop->event_readset_in, sop->event_fdsz); memcpy(sop->event_writeset_out, sop->event_writeset_in, sop->event_fdsz); nfds = sop->event_fds+1; EVBASE_RELEASE_LOCK(base, th_base_lock); res = select(nfds, sop->event_readset_out, sop->event_writeset_out, NULL, tv); EVBASE_ACQUIRE_LOCK(base, th_base_lock); check_selectop(sop); if (res == -1) { if (errno != EINTR) { event_warn("select"); return (-1); } return (0); } event_debug(("%s: select reports %d", __func__, res)); check_selectop(sop); i = random() % nfds; for (j = 0; j < nfds; ++j) { if (++i >= nfds) i = 0; res = 0; if (FD_ISSET(i, sop->event_readset_out)) res |= EV_READ; if (FD_ISSET(i, sop->event_writeset_out)) res |= EV_WRITE; if (res == 0) continue; evmap_io_active(base, i, res); } check_selectop(sop); return (0); }
static int kq_dispatch(struct event_base *base, struct timeval *tv) { struct kqop *kqop = base->evbase; struct kevent *events = kqop->events; struct kevent *changes; struct timespec ts, *ts_p = NULL; int i, n_changes, res; if (tv != NULL) { TIMEVAL_TO_TIMESPEC(tv, &ts); ts_p = &ts; } /* Build "changes" from "base->changes" */ EVUTIL_ASSERT(kqop->changes); n_changes = kq_build_changes_list(&base->changelist, kqop); if (n_changes < 0) return -1; event_changelist_remove_all(&base->changelist, base); /* steal the changes array in case some broken code tries to call * dispatch twice at once. */ changes = kqop->changes; kqop->changes = NULL; EVBASE_RELEASE_LOCK(base, th_base_lock); res = kevent(kqop->kq, changes, n_changes, events, kqop->events_size, ts_p); EVBASE_ACQUIRE_LOCK(base, th_base_lock); EVUTIL_ASSERT(kqop->changes == NULL); kqop->changes = changes; if (res == -1) { if (errno != EINTR) { event_warn("kevent"); return (-1); } return (0); } event_debug(("%s: kevent reports %d", __func__, res)); for (i = 0; i < res; i++) { int which = 0; if (events[i].flags & EV_ERROR) { /* * Error messages that can happen, when a delete fails. * EBADF happens when the file descriptor has been * closed, * ENOENT when the file descriptor was closed and * then reopened. * EINVAL for some reasons not understood; EINVAL * should not be returned ever; but FreeBSD does :-\ * An error is also indicated when a callback deletes * an event we are still processing. In that case * the data field is set to ENOENT. */ if (events[i].data == EBADF || events[i].data == EINVAL || events[i].data == ENOENT) continue; errno = events[i].data; return (-1); } if (events[i].filter == EVFILT_READ) { which |= EV_READ; } else if (events[i].filter == EVFILT_WRITE) { which |= EV_WRITE; } else if (events[i].filter == EVFILT_SIGNAL) { which |= EV_SIGNAL; } if (!which) continue; if (events[i].filter == EVFILT_SIGNAL) { evmap_signal_active(base, events[i].ident, 1); } else { evmap_io_active(base, events[i].ident, which | EV_ET); } } if (res == kqop->events_size) { struct kevent *newresult; int size = kqop->events_size; /* We used all the events space that we have. Maybe we should make it bigger. */ size *= 2; newresult = mm_realloc(kqop->events, size * sizeof(struct kevent)); if (newresult) { kqop->events = newresult; kqop->events_size = size; } } return (0); }
/* returns null on error. */ int CVia::ToStr(char** dest) { char* buf; size_t len; size_t plen; char* tmp; *dest = NULL; if ((this == NULL) || (this->m_pcHost == NULL) || (this->m_pcVersion == NULL) || (this->m_pcProtocol == NULL)) return -1; len = strlen(this->m_pcVersion) + 1 + strlen(this->m_pcProtocol) + 1 + 3 + 2; /* sip/xxx/xxx */ len = len + strlen(this->m_pcHost) + 3 + 1; if (this->m_pcPort != NULL) len = len + strlen(this->m_pcPort) + 2; buf = (char *) mm_malloc(len); if (buf == NULL) return -1; if (strchr(this->m_pcHost, ':') != NULL) { if (this->m_pcPort == NULL) sprintf(buf, "SIP/%s/%s [%s]", this->m_pcVersion, this->m_pcProtocol, this->m_pcHost); else sprintf(buf, "SIP/%s/%s [%s]:%s", this->m_pcVersion, this->m_pcProtocol, this->m_pcHost, this->m_pcPort); } else { if (this->m_pcPort == NULL) sprintf(buf, "SIP/%s/%s %s", this->m_pcVersion, this->m_pcProtocol, this->m_pcHost); else sprintf(buf, "SIP/%s/%s %s:%s", this->m_pcVersion, this->m_pcProtocol, this->m_pcHost, this->m_pcPort); } { int pos = 0; CUrlParam* u_param; while (!this->m_listVia_params.IsListEof(pos)) { u_param = (CUrlParam *) this->m_listVia_params.GetAt(pos); if (u_param->gvalue == NULL) plen = strlen(u_param->gname) + 2; else plen = strlen(u_param->gname) + strlen(u_param->gvalue) + 3; len = len + plen; buf = (char *) mm_realloc(buf, len); tmp = buf; tmp = tmp + strlen(tmp); if (u_param->gvalue == NULL) sprintf(tmp, ";%s", u_param->gname); else sprintf(tmp, ";%s=%s", u_param->gname, u_param->gvalue); pos++; } } if (this->m_pcComment != NULL) { len = len + strlen(this->m_pcComment) + 4; buf = (char *) mm_realloc(buf, len); tmp = buf; tmp = tmp + strlen(tmp); sprintf(tmp, " (%s)", this->m_pcComment); } *dest = buf; return 0; }
void * NVRealloc(NVRDescr * addr, void * ptr, int size) { printf("oldPtr is %p\n", ptr); void * newMemPtr = mm_realloc(ptr,(size_t)size); printf("newMemPtr is %p\n", newMemPtr); return newMemPtr; }
/* Helper: set the signal handler for evsignal to handler in base, so that * we can restore the original handler when we clear the current one. */ int evsig_set_handler_(struct event_base *base, int evsignal, void (__cdecl *handler)(int)) { #ifdef EVENT__HAVE_SIGACTION struct sigaction sa; #else ev_sighandler_t sh; #endif struct evsig_info *sig = &base->sig; void *p; /* * resize saved signal handler array up to the highest signal number. * a dynamic array is used to keep footprint on the low side. */ if (evsignal >= sig->sh_old_max) { int new_max = evsignal + 1; event_debug(("%s: evsignal (%d) >= sh_old_max (%d), resizing", __func__, evsignal, sig->sh_old_max)); p = mm_realloc(sig->sh_old, new_max * sizeof(*sig->sh_old)); if (p == NULL) { event_warn("realloc"); return (-1); } memset((char *)p + sig->sh_old_max * sizeof(*sig->sh_old), 0, (new_max - sig->sh_old_max) * sizeof(*sig->sh_old)); sig->sh_old_max = new_max; sig->sh_old = p; } /* allocate space for previous handler out of dynamic array */ sig->sh_old[evsignal] = mm_malloc(sizeof *sig->sh_old[evsignal]); if (sig->sh_old[evsignal] == NULL) { event_warn("malloc"); return (-1); } /* save previous handler and setup new handler */ #ifdef EVENT__HAVE_SIGACTION memset(&sa, 0, sizeof(sa)); sa.sa_handler = handler; sa.sa_flags |= SA_RESTART; sigfillset(&sa.sa_mask); if (sigaction(evsignal, &sa, sig->sh_old[evsignal]) == -1) { event_warn("sigaction"); mm_free(sig->sh_old[evsignal]); sig->sh_old[evsignal] = NULL; return (-1); } #else if ((sh = signal(evsignal, handler)) == SIG_ERR) { event_warn("signal"); mm_free(sig->sh_old[evsignal]); sig->sh_old[evsignal] = NULL; return (-1); } *sig->sh_old[evsignal] = sh; #endif return (0); }
/****************************************************************************** * This function reads the entire sequence header at the start of a new sequence. * The current position is assumed to be start of a new sequence. * Read from the current position to the end of the current line. * * Returns TRUE if it was able to read the sequence text, FALSE if * EOF reached before the terminal newline was found. Dies if other errors * are encountered. *****************************************************************************/ BOOLEAN_T read_seq_header_from_seq_reader_from_fasta( SEQ_READER_FROM_FASTA_T *fasta_reader ) { int result = FALSE; // Initial allocation of sequence buffer const size_t initial_buffer_len = 100; if (fasta_reader->sequence_header == NULL) { fasta_reader->sequence_header = mm_malloc(sizeof(char) * initial_buffer_len); fasta_reader->sequence_buffer_len = initial_buffer_len; } // Look for EOL int c = 0; int seq_index = 0; while((c = fgetc(fasta_reader->fasta_file)) != EOF) { if (seq_index >= fasta_reader->sequence_buffer_len) { // Need to grow buffer fasta_reader->sequence_header = mm_realloc( fasta_reader->sequence_header, 2 * fasta_reader->sequence_buffer_len ); fasta_reader->sequence_buffer_len = 2 * fasta_reader->sequence_buffer_len; } if (c == '\n') { // Found EOL fasta_reader->sequence_header[seq_index] = '\0'; fasta_reader->sequence_header_len = seq_index + 1; fasta_reader->at_start_of_line = TRUE; result = TRUE; break; } else { // Keep looking for EOL fasta_reader->sequence_header[seq_index] = c; ++seq_index; } } // At this point c is EOL or EOF if (c == EOF) { if (ferror(fasta_reader->fasta_file)) { // EOF could actually indicate an error. die( "Error reading file:%s.\nError message: %s\n", fasta_reader->filename, strerror(ferror(fasta_reader->fasta_file)) ); } else if (feof(fasta_reader->fasta_file)) { // Reached EOF before reaching EOL for the sequence. fasta_reader->sequence_header[0] = '\0'; fasta_reader->sequence_header_len = 0; } } return result; }
FAR void *realloc(FAR void *oldmem, size_t size) { return mm_realloc(&g_mmheap, oldmem, size); }
static int epoll_dispatch(struct event_base *base, struct timeval *tv) { struct epollop *epollop = base->evbase; struct epoll_event *events = epollop->events; int i, res; long timeout = -1; #ifdef USING_TIMERFD if (epollop->timerfd >= 0) { struct itimerspec is; is.it_interval.tv_sec = 0; is.it_interval.tv_nsec = 0; if (tv == NULL) { /* No timeout; disarm the timer. */ is.it_value.tv_sec = 0; is.it_value.tv_nsec = 0; } else { if (tv->tv_sec == 0 && tv->tv_usec == 0) { /* we need to exit immediately; timerfd can't * do that. */ timeout = 0; } is.it_value.tv_sec = tv->tv_sec; is.it_value.tv_nsec = tv->tv_usec * 1000; } /* TODO: we could avoid unnecessary syscalls here by only calling timerfd_settime when the top timeout changes, or when we're called with a different timeval. */ if (timerfd_settime(epollop->timerfd, 0, &is, NULL) < 0) { event_warn("timerfd_settime"); } } else #endif if (tv != NULL) { timeout = evutil_tv_to_msec_(tv); if (timeout < 0 || timeout > MAX_EPOLL_TIMEOUT_MSEC) { /* Linux kernels can wait forever if the timeout is * too big; see comment on MAX_EPOLL_TIMEOUT_MSEC. */ timeout = MAX_EPOLL_TIMEOUT_MSEC; } } epoll_apply_changes(base); event_changelist_remove_all_(&base->changelist, base); EVBASE_RELEASE_LOCK(base, th_base_lock); res = epoll_wait(epollop->epfd, events, epollop->nevents, timeout); EVBASE_ACQUIRE_LOCK(base, th_base_lock); if (res == -1) { if (errno != EINTR) { event_warn("epoll_wait"); return (-1); } return (0); } event_debug(("%s: epoll_wait reports %d", __func__, res)); EVUTIL_ASSERT(res <= epollop->nevents); for (i = 0; i < res; i++) { int what = events[i].events; short ev = 0; #ifdef USING_TIMERFD if (events[i].data.fd == epollop->timerfd) continue; #endif if (what & (EPOLLHUP|EPOLLERR)) { ev = EV_READ | EV_WRITE; } else { if (what & EPOLLIN) ev |= EV_READ; if (what & EPOLLOUT) ev |= EV_WRITE; if (what & EPOLLRDHUP) ev |= EV_CLOSED; } if (!ev) continue; evmap_io_active_(base, events[i].data.fd, ev | EV_ET); } if (res == epollop->nevents && epollop->nevents < MAX_NEVENT) { /* We used all of the event space this time. We should be ready for more events next time. */ int new_nevents = epollop->nevents * 2; struct epoll_event *new_events; new_events = mm_realloc(epollop->events, new_nevents * sizeof(struct epoll_event)); if (new_events) { epollop->events = new_events; epollop->nevents = new_nevents; } } return (0); }
int main(int argc, char **argv) { int *data,i; double *mdata; printf("Allocating double\n"); mdata = (double*) mm_malloc(sizeof(double)); *mdata = 12345; printf("double = %f\n",*mdata); printf("double freed\n"); mm_free(mdata); printf("\nAllocating integer array\n"); data = (int*) mm_malloc(2*sizeof(int)); data[0] = 1024; data[1] = 2048; printf("Data = {%d,%d}\n",data[0],data[1]); printf("Block reuse %s\n",((void*)data==(void*)mdata)?"passed":"failed..."); printf("Extending integer array\n"); data = (int*) mm_realloc(data,1028*sizeof(int)); //past next page for(i = 0; i < 1028; i++) data[i] = i; printf("Freeing integer array\n"); mm_free(data); printf("\nAllocating nothing\n"); data = (int*) mm_malloc(0); printf("Zero size mm_malloc %s\n",(data==NULL)?"passed":"failed..."); mm_free(data); data = (int*) mm_malloc(-1024); printf("Negative size mm_malloc %s\n",(data==NULL)?"passed":"failed..."); mm_free(data); printf("\nTesting block splitting\n"); void* limit = sbrk(0); data = (int*) mm_malloc(100*sizeof(int)); data[64] = 123456; mdata = (double*) mm_malloc(100*sizeof(double)); mdata[64] = 12346.789; printf("Block splitting %s\n",((void*)mdata < limit)?"passed":"failed..."); printf("\nTesting block merging\n"); mm_free(data); mm_free(mdata); data = (int*) mm_malloc(1028*sizeof(int)); printf("Block merging %s\n",((void*)data < limit)?"passed":"failed..."); printf("\nTesting realloc block shrinking\n"); mdata = (double*) mm_malloc(sizeof(double)); double* old = mdata; data = (int*) mm_realloc(data,2*sizeof(int)); //shrink array mdata = (double*) mm_realloc(mdata,30*sizeof(double)); printf("Block shrinking %s\n",(old > mdata)?"passed":"failed..."); mm_free(data); mm_free(mdata); printf("\nFreeing the memory\n"); mm_free((void*)12345); printf("\n"); mm_free(NULL); printf("\n\n"); printf("malloc passed!\n"); return 0; }
static inline void resize(struct ymd_mach *vm, struct dyay *arr) { int old = arr->max; arr->max = arr->count * 3 / 2 + MAX_ADD; arr->elem = mm_realloc(vm, arr->elem, old, arr->max, sizeof(*arr->elem)); }
/* * eval_mm_valid - Check the mm malloc package for correctness */ static int eval_mm_valid(trace_t *trace, int tracenum, range_t **ranges) { int i, j; int index; int size; int oldsize; char *newp; char *oldp; char *p; /* Reset the heap and free any records in the range list */ mem_reset_brk(); clear_ranges(ranges); /* Call the mm package's init function */ if (mm_init() < 0) { malloc_error(tracenum, 0, "mm_init failed."); return 0; } /* Interpret each operation in the trace in order */ for (i = 0; i < trace->num_ops; i++) { index = trace->ops[i].index; size = trace->ops[i].size; switch (trace->ops[i].type) { case ALLOC: /* mm_malloc */ /* Call the student's malloc */ if ((p = mm_malloc(size)) == NULL) { malloc_error(tracenum, i, "mm_malloc failed."); return 0; } /* * Test the range of the new block for correctness and add it * to the range list if OK. The block must be be aligned properly, * and must not overlap any currently allocated block. */ if (add_range(ranges, p, size, tracenum, i) == 0) return 0; /* ADDED: cgw * fill range with low byte of index. This will be used later * if we realloc the block and wish to make sure that the old * data was copied to the new block */ memset(p, index & 0xFF, size); /* Remember region */ trace->blocks[index] = p; trace->block_sizes[index] = size; break; case REALLOC: /* mm_realloc */ /* Call the student's realloc */ oldp = trace->blocks[index]; if ((newp = mm_realloc(oldp, size)) == NULL) { malloc_error(tracenum, i, "mm_realloc failed."); return 0; } /* Remove the old region from the range list */ remove_range(ranges, oldp); /* Check new block for correctness and add it to range list */ if (add_range(ranges, newp, size, tracenum, i) == 0) return 0; /* ADDED: cgw * Make sure that the new block contains the data from the old * block and then fill in the new block with the low order byte * of the new index */ oldsize = trace->block_sizes[index]; if (size < oldsize) oldsize = size; for (j = 0; j < oldsize; j++) { if (newp[j] != (index & 0xFF)) { malloc_error(tracenum, i, "mm_realloc did not preserve the " "data from old block"); return 0; } } memset(newp, index & 0xFF, size); /* Remember region */ trace->blocks[index] = newp; trace->block_sizes[index] = size; break; case FREE: /* mm_free */ /* Remove region from list and call student's free function */ p = trace->blocks[index]; remove_range(ranges, p); mm_free(p); break; default: app_error("Nonexistent request type in eval_mm_valid"); } } /* As far as we know, this is a valid malloc package */ return 1; }
static int select_dispatch(struct event_base *base, struct timeval *tv) { int res=0, i, j, nfds; struct selectop *sop = base->evbase; check_selectop(sop); if (sop->resize_out_sets) { fd_set *readset_out=NULL, *writeset_out=NULL; size_t sz = sop->event_fdsz; if (!(readset_out = mm_realloc(sop->event_readset_out, sz))) return (-1); if (!(writeset_out = mm_realloc(sop->event_writeset_out, sz))) { mm_free(readset_out); return (-1); } sop->event_readset_out = readset_out; sop->event_writeset_out = writeset_out; sop->resize_out_sets = 0; } memcpy(sop->event_readset_out, sop->event_readset_in, sop->event_fdsz); memcpy(sop->event_writeset_out, sop->event_writeset_in, sop->event_fdsz); nfds = sop->event_fds+1; EVBASE_RELEASE_LOCK(base, th_base_lock); res = select(nfds, sop->event_readset_out, sop->event_writeset_out, NULL, tv); EVBASE_ACQUIRE_LOCK(base, th_base_lock); check_selectop(sop); if (res == -1) { if (errno != EINTR) { event_warn("select"); return (-1); } evsig_process(base); return (0); } else if (base->sig.evsig_caught) { evsig_process(base); } event_debug(("%s: select reports %d", __func__, res)); check_selectop(sop); i = random() % (nfds+1); for (j = 0; j <= nfds; ++j) { if (++i >= nfds+1) i = 0; res = 0; if (FD_ISSET(i, sop->event_readset_out)) res |= EV_READ; if (FD_ISSET(i, sop->event_writeset_out)) res |= EV_WRITE; if (res == 0) continue; evmap_io_active(base, i, res); } check_selectop(sop); return (0); }
/* * eval_mm_util - Evaluate the space utilization of the student's package * The idea is to remember the high water mark "hwm" of the heap for * an optimal allocator, i.e., no gaps and no internal fragmentation. * Utilization is the ratio hwm/heapsize, where heapsize is the * size of the heap in bytes after running the student's malloc * package on the trace. Note that our implementation of mem_sbrk() * doesn't allow the students to decrement the brk pointer, so brk * is always the high water mark of the heap. * */ static double eval_mm_util(trace_t *trace, int tracenum, range_t **ranges) { int i; int index; int size, newsize, oldsize; int max_total_size = 0; int total_size = 0; char *p; char *newp, *oldp; /* initialize the heap and the mm malloc package */ mem_reset_brk(); if (mm_init() < 0) app_error("mm_init failed in eval_mm_util"); for (i = 0; i < trace->num_ops; i++) { switch (trace->ops[i].type) { case ALLOC: /* mm_alloc */ index = trace->ops[i].index; size = trace->ops[i].size; if ((p = mm_malloc(size)) == NULL) app_error("mm_malloc failed in eval_mm_util"); /* Remember region and size */ trace->blocks[index] = p; trace->block_sizes[index] = size; /* Keep track of current total size * of all allocated blocks */ total_size += size; /* Update statistics */ max_total_size = (total_size > max_total_size) ? total_size : max_total_size; break; case REALLOC: /* mm_realloc */ index = trace->ops[i].index; newsize = trace->ops[i].size; oldsize = trace->block_sizes[index]; oldp = trace->blocks[index]; if ((newp = mm_realloc(oldp,newsize)) == NULL) app_error("mm_realloc failed in eval_mm_util"); /* Remember region and size */ trace->blocks[index] = newp; trace->block_sizes[index] = newsize; /* Keep track of current total size * of all allocated blocks */ total_size += (newsize - oldsize); /* Update statistics */ max_total_size = (total_size > max_total_size) ? total_size : max_total_size; break; case FREE: /* mm_free */ index = trace->ops[i].index; size = trace->block_sizes[index]; p = trace->blocks[index]; mm_free(p); /* Keep track of current total size * of all allocated blocks */ total_size -= size; break; default: app_error("Nonexistent request type in eval_mm_util"); } } return ((double)max_total_size / (double)mem_heapsize()); }
struct usched_client_request *parse_client_instruction(const char *cmd) { int counter = 0; char *ptr = NULL, *saveptr = NULL, *qarg = NULL, *cmd_s = NULL; char **args = NULL; struct usched_client_request *req = NULL; /* Duplicate the cmd string to allow safe const and take advantage of libfsma by avoiding strdup() */ if (!(cmd_s = mm_alloc(strlen(cmd) + 1))) return NULL; strcpy(cmd_s, cmd); /* Split by space, tab and newline */ for (counter = 0, ptr = cmd_s; (ptr = strtok_r(ptr, " \t\n", &saveptr)); counter ++, ptr = NULL, qarg = NULL) { if (!(args = mm_realloc(args, sizeof(char **) * (counter + 2)))) goto _finish; args[counter] = args[counter + 1] = NULL; if ((ptr[0] == '\'') || (ptr[0] == '\"')) { size_t len = strlen(ptr); char qchr = ptr[0]; int done = (len > 1) && (ptr[len - 1] == qchr) && (ptr[len - 2] != '\\'); if (!(qarg = mm_alloc(len - done))) goto _finish; memcpy(memset(qarg, 0, len - done), ptr + 1, strlen(ptr) - 1 - done); while (!done && (ptr = strtok_r(NULL, " \t\n", &saveptr))) { len = strlen(ptr); done = (ptr[len - 1] == qchr) && ((len > 1) ? (ptr[len - 2] != '\\') : 1); if (!(qarg = mm_realloc(qarg, strlen(qarg) + 1 + len + !done))) goto _finish; if (qarg[0]) strcat(qarg, " "); strncat(qarg, ptr, len - done); } if (!done) goto _finish; args[counter] = qarg; } else { if (!(args[counter] = mm_alloc(strlen(ptr) + 1))) goto _finish; strcpy(args[counter], ptr); } } req = parse_client_instruction_array(counter, args); _finish: if (cmd_s) mm_free(cmd_s); if (qarg) mm_free(qarg); if (args) { for (counter = 0; args[counter]; counter ++) mm_free(args[counter]); mm_free(args); } return req; }
int win32_dispatch(struct event_base *base, struct timeval *tv) { struct win32op *win32op = base->evbase; int res = 0; unsigned j, i; int fd_count; SOCKET s; if (win32op->resize_out_sets) { size_t size = FD_SET_ALLOC_SIZE(win32op->num_fds_in_fd_sets); if (!(win32op->readset_out = mm_realloc(win32op->readset_out, size))) return (-1); if (!(win32op->exset_out = mm_realloc(win32op->exset_out, size))) return (-1); if (!(win32op->writeset_out = mm_realloc(win32op->writeset_out, size))) return (-1); win32op->resize_out_sets = 0; } fd_set_copy(win32op->readset_out, win32op->readset_in); fd_set_copy(win32op->exset_out, win32op->writeset_in); fd_set_copy(win32op->writeset_out, win32op->writeset_in); fd_count = (win32op->readset_out->fd_count > win32op->writeset_out->fd_count) ? win32op->readset_out->fd_count : win32op->writeset_out->fd_count; if (!fd_count) { long msec = evutil_tv_to_msec_(tv); /* Sleep's DWORD argument is unsigned long */ if (msec < 0) msec = LONG_MAX; /* Windows doesn't like you to call select() with no sockets */ Sleep(msec); return (0); } EVBASE_RELEASE_LOCK(base, th_base_lock); res = select(fd_count, (struct fd_set*)win32op->readset_out, (struct fd_set*)win32op->writeset_out, (struct fd_set*)win32op->exset_out, tv); EVBASE_ACQUIRE_LOCK(base, th_base_lock); event_debug(("%s: select returned %d", __func__, res)); if (res <= 0) { return res; } if (win32op->readset_out->fd_count) { i = evutil_weakrand_range_(&base->weakrand_seed, win32op->readset_out->fd_count); for (j=0; j<win32op->readset_out->fd_count; ++j) { if (++i >= win32op->readset_out->fd_count) i = 0; s = win32op->readset_out->fd_array[i]; evmap_io_active_(base, s, EV_READ); } } if (win32op->exset_out->fd_count) { i = evutil_weakrand_range_(&base->weakrand_seed, win32op->exset_out->fd_count); for (j=0; j<win32op->exset_out->fd_count; ++j) { if (++i >= win32op->exset_out->fd_count) i = 0; s = win32op->exset_out->fd_array[i]; evmap_io_active_(base, s, EV_WRITE); } } if (win32op->writeset_out->fd_count) { SOCKET s; i = evutil_weakrand_range_(&base->weakrand_seed, win32op->writeset_out->fd_count); for (j=0; j<win32op->writeset_out->fd_count; ++j) { if (++i >= win32op->writeset_out->fd_count) i = 0; s = win32op->writeset_out->fd_array[i]; evmap_io_active_(base, s, EV_WRITE); } } return (0); }
FAR void *kmm_realloc(FAR void *oldmem, size_t newsize) { return mm_realloc(&g_kmmheap, oldmem, newsize); }