/* * don't do any of "storage compaction" nonsense, "just" the three modes: * + cp == NULL ==> malloc * + nbytes == 0 ==> free * + else ==> realloc */ void * bmk_memrealloc(void *cp, size_t nbytes) { union overhead *op; size_t size; size_t alignpad; void *np; if (cp == NULL) return bmk_memalloc(nbytes, 8); if (nbytes == 0) { bmk_memfree(cp); return NULL; } op = ((union overhead *)cp)-1; size = op->ov_index; alignpad = op->ov_alignpad; /* don't bother "compacting". don't like it? don't use realloc! */ if (((1<<(size+MINSHIFT)) - (alignpad+sizeof(*op))) >= nbytes) return cp; /* we're gonna need a bigger bucket */ np = bmk_memalloc(nbytes, 8); if (np == NULL) return NULL; memcpy(np, cp, (1<<(size+MINSHIFT)) - (alignpad+sizeof(*op))); bmk_memfree(cp); return np; }
void rumpuser_cv_destroy(struct rumpuser_cv *cv) { bmk_assert(cv->nwaiters == 0); bmk_memfree(cv, BMK_MEMWHO_WIREDBMK); }
void rumpuser_mutex_destroy(struct rumpuser_mtx *mtx) { bmk_assert(TAILQ_EMPTY(&mtx->waiters) && mtx->o == NULL); bmk_memfree(mtx, BMK_MEMWHO_WIREDBMK); }
struct consfront_dev *xencons_ring_init(void) { int err; struct consfront_dev *dev; if (!start_info.console.domU.evtchn) return 0; dev = bmk_memcalloc(1, sizeof(struct consfront_dev)); dev->nodename = "device/console"; dev->dom = 0; dev->backend = 0; dev->ring_ref = 0; dev->evtchn = start_info.console.domU.evtchn; dev->ring = (struct xencons_interface *) mfn_to_virt(start_info.console.domU.mfn); err = minios_bind_evtchn(dev->evtchn, console_handle_input, dev); if (err <= 0) { minios_printk("XEN console request chn bind failed %i\n", err); bmk_memfree(dev); return NULL; } minios_unmask_evtchn(dev->evtchn); /* In case we have in-flight data after save/restore... */ notify_daemon(dev); return dev; }
void rumpuser_cv_destroy(struct rumpuser_cv *cv) { assert(cv->nwaiters == 0); bmk_memfree(cv); }
void rumpuser_mutex_destroy(struct rumpuser_mtx *mtx) { assert(TAILQ_EMPTY(&mtx->waiters) && mtx->o == NULL); bmk_memfree(mtx); }
int main() { void **rings; /* yay! */ void **ring_alloc, **ring_free; /* yay! */ int i, n; srandom(time(NULL)); rings = malloc(NALLOC * NRING * sizeof(void *)); /* so we can free() immediately without stress */ memset(rings, 0, NALLOC * NRING * sizeof(void *)); for (n = 0;; n = (n+1) % NRING) { if (n == 0) mstats(""); ring_alloc = &rings[n * NALLOC]; ring_free = &rings[((n + NRING/2) % NRING) * NALLOC]; for (i = 0; i < NALLOC; i++) { ring_alloc[i] = testalloc(); bmk_memfree(ring_free[i]); } } }
void xenbus_wait_for_watch(struct xenbus_event_queue *queue) { char **ret; if (!queue) queue = &xenbus_default_watch_queue; ret = xenbus_wait_for_watch_return(queue); if (ret) bmk_memfree(ret, BMK_MEMWHO_WIREDBMK); else minios_printk("unexpected path returned by watch\n"); }
char* xenbus_wait_for_value(const char* path, const char* value, struct xenbus_event_queue *queue) { if (!queue) queue = &xenbus_default_watch_queue; for(;;) { char *res, *msg; int r; msg = xenbus_read(XBT_NIL, path, &res); if(msg) return msg; r = bmk_strcmp(value,res); bmk_memfree(res, BMK_MEMWHO_WIREDBMK); if(r==0) break; else xenbus_wait_for_watch(queue); } return NULL; }
char *xenbus_switch_state(xenbus_transaction_t xbt, const char* path, XenbusState state) { char *current_state; char *msg = NULL; char *msg2 = NULL; char value[2]; XenbusState rs; int xbt_flag = 0; int retry = 0; do { if (xbt == XBT_NIL) { msg = xenbus_transaction_start(&xbt); if (msg) goto exit; xbt_flag = 1; } msg = xenbus_read(xbt, path, ¤t_state); if (msg) goto exit; rs = (XenbusState) (current_state[0] - '0'); bmk_memfree(current_state, BMK_MEMWHO_WIREDBMK); if (rs == state) { msg = NULL; goto exit; } bmk_snprintf(value, 2, "%d", state); msg = xenbus_write(xbt, path, value); exit: if (xbt_flag) { msg2 = xenbus_transaction_end(xbt, 0, &retry); xbt = XBT_NIL; } if (msg == NULL && msg2 != NULL) msg = msg2; } while (retry); return msg; }
char *xenbus_wait_for_state_change(const char* path, XenbusState *state, struct xenbus_event_queue *queue) { if (!queue) queue = &xenbus_default_watch_queue; for(;;) { char *res, *msg; XenbusState rs; msg = xenbus_read(XBT_NIL, path, &res); if(msg) return msg; rs = (XenbusState) (res[0] - 48); bmk_memfree(res, BMK_MEMWHO_WIREDBMK); if (rs == *state) xenbus_wait_for_watch(queue); else { *state = rs; break; } } return NULL; }
void rumpuser_rw_destroy(struct rumpuser_rw *rw) { bmk_memfree(rw, BMK_MEMWHO_WIREDBMK); }
void rumpuser_rw_destroy(struct rumpuser_rw *rw) { bmk_memfree(rw); }
void free(void *cp) { bmk_memfree(cp); }
static void xenbus_thread_func(void *ign) { struct xsd_sockmsg msg; unsigned prod = xenstore_buf->rsp_prod; for (;;) { minios_wait_event(xb_waitq, prod != xenstore_buf->rsp_prod); while (1) { prod = xenstore_buf->rsp_prod; DEBUG("Rsp_cons %d, rsp_prod %d.\n", xenstore_buf->rsp_cons, xenstore_buf->rsp_prod); if (xenstore_buf->rsp_prod - xenstore_buf->rsp_cons < sizeof(msg)) { minios_notify_remote_via_evtchn(start_info.store_evtchn); break; } rmb(); memcpy_from_ring(xenstore_buf->rsp, &msg, MASK_XENSTORE_IDX(xenstore_buf->rsp_cons), sizeof(msg)); DEBUG("Msg len %d, %d avail, id %d.\n", msg.len + sizeof(msg), xenstore_buf->rsp_prod - xenstore_buf->rsp_cons, msg.req_id); if (xenstore_buf->rsp_prod - xenstore_buf->rsp_cons < sizeof(msg) + msg.len) { minios_notify_remote_via_evtchn(start_info.store_evtchn); break; } DEBUG("Message is good.\n"); if(msg.type == XS_WATCH_EVENT) { struct xenbus_event *event = bmk_xmalloc_bmk(sizeof(*event) + msg.len); struct xenbus_event_queue *events = NULL; char *data = (char*)event + sizeof(*event); struct xenbus_watch *watch; memcpy_from_ring(xenstore_buf->rsp, data, MASK_XENSTORE_IDX(xenstore_buf->rsp_cons + sizeof(msg)), msg.len); event->path = data; event->token = event->path + bmk_strlen(event->path) + 1; mb(); xenstore_buf->rsp_cons += msg.len + sizeof(msg); spin_lock(&xenbus_req_lock); MINIOS_LIST_FOREACH(watch, &watches, entry) if (!bmk_strcmp(watch->token, event->token)) { event->watch = watch; events = watch->events; break; } if (events) { queue_event(events, event); } else { minios_printk("unexpected watch token %s\n", event->token); bmk_memfree(event, BMK_MEMWHO_WIREDBMK); } spin_unlock(&xenbus_req_lock); } else { req_info[msg.req_id].for_queue->reply = bmk_xmalloc_bmk(sizeof(msg) + msg.len); memcpy_from_ring(xenstore_buf->rsp, req_info[msg.req_id].for_queue->reply, MASK_XENSTORE_IDX(xenstore_buf->rsp_cons), msg.len + sizeof(msg)); mb(); xenstore_buf->rsp_cons += msg.len + sizeof(msg); spin_lock(&xenbus_req_lock); queue_event(req_info[msg.req_id].reply_queue, req_info[msg.req_id].for_queue); spin_unlock(&xenbus_req_lock); } wmb(); minios_notify_remote_via_evtchn(start_info.store_evtchn); } }