/* * REQUIRES: * block write lock * * EFFECTS: * empower the fly holes to be visiable */ void block_shrink(struct block *b) { uint32_t i, j; rwlock_write_lock(&b->rwlock, &b->mtx); for (i = 0, j = 0; i < b->pairs_used; i++) { if (b->pairs[i].used) { b->pairs[j++] = b->pairs[i]; } } b->pairs_used = j; rwlock_write_unlock(&b->rwlock); }
void block_init(struct block *b, struct block_pair *pairs, uint32_t n) { uint32_t i; nassert(n > 0); rwlock_write_lock(&b->rwlock, &b->mtx); qsort(pairs, n, sizeof(*pairs), _pair_compare_fun); for (i = 0; i < n; i++) { _extend(b, 1); memcpy(&b->pairs[i], &pairs[i], sizeof(*pairs)); b->pairs_used++; } /* get the last allocated postion of file*/ b->allocated += pairs[n - 1].offset; b->allocated += ALIGN(pairs[n - 1].real_size); rwlock_write_unlock(&b->rwlock); }
DISKOFF block_alloc_off(struct block *b, uint64_t nid, uint32_t real_size, uint32_t skeleton_size, uint32_t height) { DISKOFF r; uint32_t i; int found = 0; uint32_t pos = 0; rwlock_write_lock(&b->rwlock, &b->mtx); r = ALIGN(b->allocated); _extend(b, 1); /* * set old hole to fly * it is not visible until you call 'block_shrink' */ for (i = 0; i < b->pairs_used; i++) { if (b->pairs[i].nid == nid) { b->pairs[i].used = 0; break; } } /* find the not-fly hole to reuse */ if (b->pairs_used > 0) { for (pos = 0; pos < (b->pairs_used - 1); pos++) { DISKOFF off_aligned; struct block_pair *p; struct block_pair *nxtp; p = &b->pairs[pos]; nxtp = &b->pairs[pos + 1]; off_aligned = (ALIGN(p->offset) + ALIGN(p->real_size)); if ((off_aligned + ALIGN(real_size)) <= nxtp->offset) { r = off_aligned; memmove(&b->pairs[pos + 1 + 1], &b->pairs[pos + 1], sizeof(*b->pairs) * (b->pairs_used - pos - 1)); found = 1; break; } } } /* found the reuse hole */ if (found) { pos += 1; } else { pos = b->pairs_used; b->allocated = (ALIGN(b->allocated) + ALIGN(real_size)); } b->pairs[pos].offset = r; b->pairs[pos].height = height; b->pairs[pos].real_size = real_size; b->pairs[pos].skeleton_size = skeleton_size; b->pairs[pos].nid = nid; b->pairs[pos].used = 1; b->pairs_used++; rwlock_write_unlock(&b->rwlock); return r; }
int main(void) { uint64_t s_b, e_b, i; ck_bytelock_t bytelock = CK_BYTELOCK_INITIALIZER; rwlock_t naive; for (i = 0; i < STEPS; i++) { ck_bytelock_write_lock(&bytelock, 1); ck_bytelock_write_unlock(&bytelock); } s_b = rdtsc(); for (i = 0; i < STEPS; i++) { ck_bytelock_write_lock(&bytelock, 1); ck_bytelock_write_unlock(&bytelock); } e_b = rdtsc(); printf("WRITE: bytelock %15" PRIu64 "\n", (e_b - s_b) / STEPS); rwlock_init(&naive); for (i = 0; i < STEPS; i++) { rwlock_write_lock(&naive); rwlock_write_unlock(&naive); } s_b = rdtsc(); for (i = 0; i < STEPS; i++) { rwlock_write_lock(&naive); rwlock_write_unlock(&naive); } e_b = rdtsc(); printf("WRITE: naive %15" PRIu64 "\n", (e_b - s_b) / STEPS); for (i = 0; i < STEPS; i++) { ck_bytelock_read_lock(&bytelock, 1); ck_bytelock_read_unlock(&bytelock, 1); } s_b = rdtsc(); for (i = 0; i < STEPS; i++) { ck_bytelock_read_lock(&bytelock, 1); ck_bytelock_read_unlock(&bytelock, 1); } e_b = rdtsc(); printf("READ: bytelock %15" PRIu64 "\n", (e_b - s_b) / STEPS); for (i = 0; i < STEPS; i++) { rwlock_read_lock(&naive); rwlock_read_unlock(&naive); } s_b = rdtsc(); for (i = 0; i < STEPS; i++) { rwlock_read_lock(&naive); rwlock_read_unlock(&naive); } e_b = rdtsc(); printf("READ: naive %15" PRIu64 "\n", (e_b - s_b) / STEPS); return (0); }