int main () { int fd = open(DATA_DIR "blog", O_RDWR); if(fd < 0) { perror("open()"); return -1; } BMEntry * ben = MMAP_FD(fd, MAX_ENTRIES(sizeof(BMEntry))); if (ben == NULL) { perror("mmap()"); return -1; } BucketLog * blog = (BucketLog *) ben; close(fd); long long total = 0; int i; for (i = 1; i <= blog->bucketID; i++) { total += ben[i].size; } printf("%lld\n",total); return 0; }
/** * Implements BucketService->start() */ static int start(Queue * iq, Queue * oq) { int ret, fd; service._iq = iq; service._oq = oq; /* Load Bucket Log */ fd = open(DATA_DIR "blog", O_RDWR | O_CREAT, 0644); assert(!ftruncate(fd, MAX_ENTRIES(sizeof(BMEntry)))); service._en = MMAP_FD(fd, MAX_ENTRIES(sizeof(BMEntry))); service._log = (BucketLog *) service._en; close(fd); memset(service._padding, 0, BLOCK_SIZE); ret = pthread_create(&service._tid, NULL, process, NULL); return ret; }
int main(int argc, char * argv[]) { if (argc != 3) { fprintf(stderr, "Usage: %s images version\n", argv[0]); return 0; } char buf[128]; uint32_t insts = atoi(argv[1]); uint32_t version = atoi(argv[2]); int fd; uint64_t i; fd = open(DATA_DIR "blog", O_RDWR); BMEntry * ben = MMAP_FD(fd, MAX_ENTRIES(sizeof(BMEntry))); BucketLog * blog = (BucketLog *) ben; close(fd); fd = open(DATA_DIR "ilog", O_RDWR); ssize_t isize = lseek(fd, 0, SEEK_END); IMEntry * ien = MMAP_FD(fd, isize); insts = isize / sizeof(IMEntry); close(fd); /// Remove direct and indirect recipe for (i = 0; i < insts; i++) { sprintf(buf, DATA_DIR "image/%lu-%u", i, version); unlink(buf); sprintf(buf, DATA_DIR "image/i%lu-%u", i, version); unlink(buf); ien[i].old--; ien[i].deleted++; } /// Remove buckets tagged with specified version for (i = 1; i <= blog->bucketID; i++) { if (ben[i].ver == version) { sprintf(buf, DATA_DIR "bucket/%08lx", i); unlink(buf); ben[i].size = 0; ben[i].psize = 0; } } sync(); munmap(ben, MAX_ENTRIES(sizeof(BMEntry))); munmap(ien, INST_MAX(sizeof(IMEntry))); return 0; }
int main(int argc, char * argv[]) { if (argc != 3) { fprintf(stderr, "Usage : %s insts version\n", argv[0]); return 0; } int fd, i; fd = open(DATA_DIR "ilog", O_RDWR); IMEntry * ien = MMAP_FD(fd, INST_MAX(sizeof(IMEntry))); close(fd); fd = open(DATA_DIR "slog", O_RDWR); SMEntry * sen = MMAP_FD(fd, MAX_ENTRIES(sizeof(SMEntry))); close(fd); fd = open(DATA_DIR "clog", O_RDWR); CMEntry * cen = MMAP_FD(fd, MAX_ENTRIES(sizeof(CMEntry))); close(fd); fd = open(DATA_DIR "blog", O_RDWR); BMEntry * ben = MMAP_FD(fd, MAX_ENTRIES(sizeof(BMEntry))); close(fd); uint32_t ins = atoi(argv[1]); int32_t ver = atoi(argv[2]); if (ver >= 0) { struct timeval x; TIMERSTART(x); /// Accounting for number of recent and early image for (i = 0; i < ins; i++) { ien[i].recent--; ien[i].old++; } /// Run services below RevRefService * rrs = GetRevRefService(); rrs->start(sen, ins, ver); rrs->stop(); RevMapService * rms = GetRevMapService(); rms->start(sen, cen, ins, ver); rms->stop(); RevRbdService * rbs = GetRevRbdService(); rbs->start(sen, cen, ben, ver); rbs->stop(); sync(); TIMERSTOP(x); printf("%ld.%06ld\n", x.tv_sec, x.tv_usec); } else { printf("%ld.%06ld\n", 0L, 0L); } munmap(ien, INST_MAX(sizeof(IMEntry))); munmap(sen, MAX_ENTRIES(sizeof(SMEntry))); munmap(cen, MAX_ENTRIES(sizeof(CMEntry))); munmap(ben, MAX_ENTRIES(sizeof(BMEntry))); return 0; }
int main(int argc, char * argv[]) { if (argc != 3) { fprintf(stderr, "Usage : %s <instanceID> <version number>\n", argv[0]); return 0; } char buf[128]; uint32_t ins = atoi(argv[1]); uint32_t ver = atoi(argv[2]); uint32_t fd, ifd; fd = open(DATA_DIR "slog", O_RDWR); sen = MMAP_FD(fd, MAX_ENTRIES(sizeof(SMEntry))); close(fd); fd = open(DATA_DIR "blog", O_RDWR); ben = MMAP_FD(fd, MAX_ENTRIES(sizeof(BMEntry))); close(fd); sprintf(buf, DATA_DIR "image/%u-%u", ins, ver); ifd = open(buf, O_RDONLY); struct timeval x; TIMERSTART(x); Direct dir; while (read(ifd, &dir, sizeof(dir)) > 0) { /// Decrements reference count of each direct reference if (--sen[dir.id].ref <= 0) { ben[sen[dir.id].bucket].psize += sen[dir.id].len; //printf("Seg: %llu, size: %u, bucket: %llu\n",dir.id,sen[dir.id].len,sen[dir.id].bucket); } } close(ifd); unlink(buf); TIMERSTOP(x); printf("%ld.%06ld\n", x.tv_sec, x.tv_usec); munmap(ben, MAX_ENTRIES(sizeof(BMEntry))); munmap(sen, MAX_ENTRIES(sizeof(SMEntry))); return 0; }
int main(int argc, char * argv[]){ int i; int fd = open(DATA_DIR "slog", O_RDWR); SMEntry * sen = MMAP_FD(fd, MAX_ENTRIES(sizeof(SMEntry))); close(fd); fd = open(DATA_DIR "blog", O_RDWR); BMEntry * ben = MMAP_FD(fd, MAX_ENTRIES(sizeof(BMEntry))); close(fd); SegmentLog * slog = (SegmentLog*)sen; //fprintf(stderr,"Number of segments: %d\n",slog->segID); for(i = 1;i<=slog->segID;i++) if (sen[i].pos > ben[sen[i].bucket].size) printf("Bucket %08lx(%d), segment %d(%d)\n",sen[i].bucket,ben[sen[i].bucket].size,i,sen[i].pos); munmap(sen,MAX_ENTRIES(sizeof(SMEntry))); return 0; }
/** * Implements BucketService->stop() */ static int stop() { int ret, i; ret = pthread_join(service._tid, NULL); munmap(service._en, MAX_ENTRIES(sizeof(BMEntry))); return ret; }
static QMGR_JOB *qmgr_job_preempt(QMGR_JOB *current) { const char *myname = "qmgr_job_preempt"; QMGR_TRANSPORT *transport = current->transport; QMGR_JOB *job, *prev; int expected_slots; int rcpt_slots; /* * Suppress preempting completely if the current job is not big enough to * accumulate even the minimal number of slots required. * * Also, don't look for better job candidate if there are no available slots * yet (the count can get negative due to the slot loans below). */ if (current->slots_available <= 0 || MAX_ENTRIES(current) < transport->min_slots * transport->slot_cost) return (current); /* * Find best candidate for preempting the current job. * * Note that the function also takes care that the candidate fits within the * number of delivery slots which the current job is still able to * accumulate. */ if ((job = qmgr_job_candidate(current)) == 0) return (current); /* * Sanity checks. */ if (job == current) msg_panic("%s: attempt to preempt itself", myname); if (job->stack_children.next != 0) msg_panic("%s: already on the job stack (%d)", myname, job->stack_level); if (job->stack_level < 0) msg_panic("%s: not on the job list (%d)", myname, job->stack_level); /* * Check if there is enough available delivery slots accumulated to * preempt the current job. * * The slot loaning scheme improves the average message response time. Note * that the loan only allows the preemption happen earlier, though. It * doesn't affect how many slots have to be "paid" - in either case the * full number of slots required has to be accumulated later before the * current job can be preempted again. */ expected_slots = MAX_ENTRIES(job) - job->selected_entries; if (current->slots_available / transport->slot_cost + transport->slot_loan < expected_slots * transport->slot_loan_factor / 100.0) return (current); /* * Preempt the current job. * * This involves placing the selected candidate in front of the current job * on the job list and updating the stack parent/child/sibling pointers * appropriately. But first we need to make sure that the candidate is * taken from its previous job stack which it might be top of. */ if (job->stack_level > 0) qmgr_job_pop(job); QMGR_LIST_UNLINK(transport->job_list, QMGR_JOB *, job, transport_peers); prev = current->transport_peers.prev; QMGR_LIST_LINK(transport->job_list, prev, job, current, transport_peers); job->stack_parent = current; QMGR_LIST_APPEND(current->stack_children, job, stack_siblings); job->stack_level = current->stack_level + 1; /* * Update the current job pointer and explicitly reset the candidate * cache. */ transport->job_current = job; RESET_CANDIDATE_CACHE(transport); /* * Since the single job can be preempted by several jobs at the same * time, we have to adjust the available slot count now to prevent using * the same slots multiple times. To do that we subtract the number of * slots the preempting job will supposedly use. This number will be * corrected later when that job is popped from the stack to reflect the * number of slots really used. * * As long as we don't need to keep track of how many slots were really * used, we can (ab)use the slots_used counter for counting the * difference between the real and expected amounts instead of the * absolute amount. */ current->slots_available -= expected_slots * transport->slot_cost; job->slots_used = -expected_slots; /* * Add part of extra recipient slots reserved for preempting jobs to the * new current job if necessary. * * Note that transport->rcpt_unused is within <-rcpt_per_stack,0> in such * case. */ if (job->message->rcpt_offset != 0) { rcpt_slots = (transport->rcpt_per_stack + transport->rcpt_unused + 1) / 2; job->rcpt_limit += rcpt_slots; job->message->rcpt_limit += rcpt_slots; transport->rcpt_unused -= rcpt_slots; } if (msg_verbose) msg_info("%s: %s by %s, level %d", myname, current->message->queue_id, job->message->queue_id, job->stack_level); return (job); }
static QMGR_JOB *qmgr_job_candidate(QMGR_JOB *current) { QMGR_TRANSPORT *transport = current->transport; QMGR_JOB *job, *best_job = 0; double score, best_score = 0.0; int max_slots, max_needed_entries, max_total_entries; int delay; time_t now = sane_time(); /* * Fetch the result directly from the cache if the cache is still valid. * * Note that we cache negative results too, so the cache must be invalidated * by resetting the cached current job pointer, not the candidate pointer * itself. * * In case the cache is valid and contains no candidate, we can ignore the * time change, as it affects only which candidate is the best, not if * one exists. However, this feature requires that we no longer relax the * cache resetting rules, depending on the automatic cache timeout. */ if (transport->candidate_cache_current == current && (transport->candidate_cache_time == now || transport->candidate_cache == 0)) return (transport->candidate_cache); /* * Estimate the minimum amount of delivery slots that can ever be * accumulated for the given job. All jobs that won't fit into these * slots are excluded from the candidate selection. */ max_slots = (MIN_ENTRIES(current) - current->selected_entries + current->slots_available) / transport->slot_cost; /* * Select the candidate with best time_since_queued/total_recipients * score. In addition to jobs which don't meet the max_slots limit, skip * also jobs which don't have any selectable entries at the moment. * * Instead of traversing the whole job list we traverse it just from the * current job forward. This has several advantages. First, we skip some * of the blocker jobs and the current job itself right away. But the * really important advantage is that we are sure that we don't consider * any jobs that are already stack children of the current job. Thanks to * this we can easily include all encountered jobs which are leaf * children of some of the preempting stacks as valid candidates. All we * need to do is to make sure we do not include any of the stack parents. * And, because the leaf children are not ordered by the time since * queued, we have to exclude them from the early loop end test. * * However, don't bother searching if we can't find anything suitable * anyway. */ if (max_slots > 0) { for (job = current->transport_peers.next; job; job = job->transport_peers.next) { if (job->stack_children.next != 0 || IS_BLOCKER(job, transport)) continue; max_total_entries = MAX_ENTRIES(job); max_needed_entries = max_total_entries - job->selected_entries; delay = now - job->message->queued_time + 1; if (max_needed_entries > 0 && max_needed_entries <= max_slots) { score = (double) delay / max_total_entries; if (score > best_score) { best_score = score; best_job = job; } } /* * Stop early if the best score is as good as it can get. */ if (delay <= best_score && job->stack_level == 0) break; } } /* * Cache the result for later use. */ transport->candidate_cache = best_job; transport->candidate_cache_current = current; transport->candidate_cache_time = now; return (best_job); }