int main() { struct quota quota; struct slab_arena arena; quota_init("a, 0); slab_arena_create(&arena, "a, 0, 0, MAP_PRIVATE); slab_arena_print(&arena); slab_arena_destroy(&arena); quota_init("a, SLAB_MIN_SIZE); slab_arena_create(&arena, "a, 1, 1, MAP_PRIVATE); slab_arena_print(&arena); void *ptr = slab_map(&arena); slab_arena_print(&arena); void *ptr1 = slab_map(&arena); printf("going beyond the limit: %s\n", ptr1 ? "(ptr)" : "(nil)"); slab_arena_print(&arena); slab_unmap(&arena, ptr); slab_unmap(&arena, ptr1); slab_arena_print(&arena); slab_arena_destroy(&arena); quota_init("a, 2000000); slab_arena_create(&arena, "a, 3000000, 1, MAP_PRIVATE); slab_arena_print(&arena); slab_arena_destroy(&arena); }
EXPORTED int quota_update_useds(const char *quotaroot, const quota_t diff[QUOTA_NUMRESOURCES], const char *mboxname) { struct quota q; struct txn *tid = NULL; int r = 0; struct mboxevent *mboxevents = NULL; if (!quotaroot || !*quotaroot) return IMAP_QUOTAROOT_NONEXISTENT; quota_init(&q, quotaroot); r = quota_read(&q, &tid, 1); if (!r) { int res; int cmp = 1; if (q.scanmbox) { cmp = cyrusdb_compar(qdb, mboxname, strlen(mboxname), q.scanmbox, strlen(q.scanmbox)); } for (res = 0; res < QUOTA_NUMRESOURCES; res++) { int was_over = quota_is_overquota(&q, res, NULL); quota_use(&q, res, diff[res]); if (cmp <= 0) q.scanuseds[res] += diff[res]; if (was_over && !quota_is_overquota(&q, res, NULL)) { struct mboxevent *mboxevent = mboxevent_enqueue(EVENT_QUOTA_WITHIN, &mboxevents); mboxevent_extract_quota(mboxevent, &q, res); } } r = quota_write(&q, &tid); } if (r) { quota_abort(&tid); goto out; } quota_commit(&tid); mboxevent_notify(mboxevents); out: quota_free(&q); if (r) { syslog(LOG_ERR, "LOSTQUOTA: unable to record change of " QUOTA_T_FMT " bytes and " QUOTA_T_FMT " messages in quota %s: %s", diff[QUOTA_STORAGE], diff[QUOTA_MESSAGE], quotaroot, error_message(r)); } mboxevent_freequeue(&mboxevents); return r; }
int main() { quota_init("a, UINT_MAX); slab_arena_create(&arena, "a, 0, 4000000, MAP_PRIVATE); slab_cache_create(&cache, &arena); region_basic(); region_test_truncate(); slab_cache_destroy(&cache); }
int main() { seed = time(0); srand(seed); quota_init("a, UINT_MAX); slab_arena_create(&arena, "a, 0, 4000000, MAP_PRIVATE); slab_cache_create(&cache, &arena); small_alloc_basic(); slab_cache_destroy(&cache); }
int main() { struct slab_cache cache; struct slab_arena arena; struct quota quota; seed = time(0); srand(seed); quota_init("a, UINT_MAX); slab_arena_create(&arena, "a, 0, 4000000, MAP_PRIVATE); slab_cache_create(&cache, &arena); obuf_basic(&cache); slab_cache_destroy(&cache); }
static int do_onequota(void *rock, const char *key, size_t keylen, const char *data, size_t datalen) { int r = 0; struct quota quota; struct quota_foreach_t *fd = (struct quota_foreach_t *)rock; char *root = xstrndup(key, keylen); int iswrite = fd->tid ? 1 : 0; quota_init("a, root); /* XXX - error if not parsable? */ if (datalen && !quota_parseval(data, datalen, "a, iswrite)) { r = fd->proc("a, fd->rock); } quota_free("a); free(root); return r; }
EXPORTED int quota_check_useds(const char *quotaroot, const quota_t diff[QUOTA_NUMRESOURCES]) { int r = 0; struct quota q; int res; /* * We are always allowed to *reduce* usage even if it doesn't get us * below the quota. As a side effect this allows our caller to pass * delta = -1 meaning "don't care about quota checks". */ for (res = 0 ; res < QUOTA_NUMRESOURCES ; res++) { if (diff[res] >= 0) break; } if (res == QUOTA_NUMRESOURCES) return 0; /* all negative */ quota_init(&q, quotaroot); r = quota_read(&q, NULL, /*wrlock*/0); if (r == IMAP_QUOTAROOT_NONEXISTENT) { r = 0; goto done; } if (r) goto done; for (res = 0 ; res < QUOTA_NUMRESOURCES ; res++) { r = quota_check(&q, res, diff[res]); if (r) goto done; } done: quota_free(&q); return r; }