/** Create a new semaphore. * @param name Optional name for the semaphore, for debugging purposes. * @param count Initial count of the semaphore. * @param security Security attributes for the ACL. If NULL, default * attributes will be constructed which grant full access * to the semaphore to the calling process' user. * @param rights Access rights for the handle. * @param handlep Where to store handle to the semaphore. * @return Status code describing result of the operation. */ status_t kern_semaphore_create(const char *name, size_t count, const object_security_t *security, object_rights_t rights, handle_t *handlep) { object_security_t ksecurity = { -1, -1, NULL }; user_semaphore_t *sem; status_t ret; if(!handlep) return STATUS_INVALID_ARG; if(security) { ret = object_security_from_user(&ksecurity, security, true); if(ret != STATUS_SUCCESS) return ret; } /* Construct a default ACL if required. */ if(!ksecurity.acl) { ksecurity.acl = kmalloc(sizeof(*ksecurity.acl), MM_WAIT); object_acl_init(ksecurity.acl); object_acl_add_entry(ksecurity.acl, ACL_ENTRY_USER, -1, SEMAPHORE_RIGHT_USAGE); } sem = kmalloc(sizeof(user_semaphore_t), MM_WAIT); sem->id = id_allocator_alloc(&semaphore_id_allocator); if(sem->id < 0) { kfree(sem); object_security_destroy(&ksecurity); return STATUS_NO_SEMAPHORES; } if(name) { ret = strndup_from_user(name, SEMAPHORE_NAME_MAX, &sem->name); if(ret != STATUS_SUCCESS) { id_allocator_free(&semaphore_id_allocator, sem->id); kfree(sem); object_security_destroy(&ksecurity); return ret; } } else { sem->name = NULL; } object_init(&sem->obj, &semaphore_object_type, &ksecurity, NULL); object_security_destroy(&ksecurity); semaphore_init(&sem->sem, (sem->name) ? sem->name : "user_semaphore", count); refcount_set(&sem->count, 1); rwlock_write_lock(&semaphore_tree_lock); avl_tree_insert(&semaphore_tree, &sem->tree_link, sem->id, sem); rwlock_unlock(&semaphore_tree_lock); ret = object_handle_create(&sem->obj, NULL, rights, NULL, 0, NULL, NULL, handlep); if(ret != STATUS_SUCCESS) user_semaphore_release(sem); return ret; }
/** Release a user semaphore. * @param sem Semaphore to release. */ static void user_semaphore_release(user_semaphore_t *sem) { if(refcount_dec(&sem->count) == 0) { rwlock_write_lock(&semaphore_tree_lock); avl_tree_remove(&semaphore_tree, &sem->tree_link); rwlock_unlock(&semaphore_tree_lock); object_destroy(&sem->obj); id_allocator_free(&semaphore_id_allocator, sem->id); kfree(sem); } }
/* * REQUIRES: * block write lock * * EFFECTS: * empower the fly holes to be visiable */ void block_shrink(struct block *b) { uint32_t i, j; rwlock_write_lock(&b->rwlock, &b->mtx); for (i = 0, j = 0; i < b->pairs_used; i++) { if (b->pairs[i].used) { b->pairs[j++] = b->pairs[i]; } } b->pairs_used = j; rwlock_write_unlock(&b->rwlock); }
void block_init(struct block *b, struct block_pair *pairs, uint32_t n) { uint32_t i; nassert(n > 0); rwlock_write_lock(&b->rwlock, &b->mtx); qsort(pairs, n, sizeof(*pairs), _pair_compare_fun); for (i = 0; i < n; i++) { _extend(b, 1); memcpy(&b->pairs[i], &pairs[i], sizeof(*pairs)); b->pairs_used++; } /* get the last allocated postion of file*/ b->allocated += pairs[n - 1].offset; b->allocated += ALIGN(pairs[n - 1].real_size); rwlock_write_unlock(&b->rwlock); }
DISKOFF block_alloc_off(struct block *b, uint64_t nid, uint32_t real_size, uint32_t skeleton_size, uint32_t height) { DISKOFF r; uint32_t i; int found = 0; uint32_t pos = 0; rwlock_write_lock(&b->rwlock, &b->mtx); r = ALIGN(b->allocated); _extend(b, 1); /* * set old hole to fly * it is not visible until you call 'block_shrink' */ for (i = 0; i < b->pairs_used; i++) { if (b->pairs[i].nid == nid) { b->pairs[i].used = 0; break; } } /* find the not-fly hole to reuse */ if (b->pairs_used > 0) { for (pos = 0; pos < (b->pairs_used - 1); pos++) { DISKOFF off_aligned; struct block_pair *p; struct block_pair *nxtp; p = &b->pairs[pos]; nxtp = &b->pairs[pos + 1]; off_aligned = (ALIGN(p->offset) + ALIGN(p->real_size)); if ((off_aligned + ALIGN(real_size)) <= nxtp->offset) { r = off_aligned; memmove(&b->pairs[pos + 1 + 1], &b->pairs[pos + 1], sizeof(*b->pairs) * (b->pairs_used - pos - 1)); found = 1; break; } } } /* found the reuse hole */ if (found) { pos += 1; } else { pos = b->pairs_used; b->allocated = (ALIGN(b->allocated) + ALIGN(real_size)); } b->pairs[pos].offset = r; b->pairs[pos].height = height; b->pairs[pos].real_size = real_size; b->pairs[pos].skeleton_size = skeleton_size; b->pairs[pos].nid = nid; b->pairs[pos].used = 1; b->pairs_used++; rwlock_write_unlock(&b->rwlock); return r; }
int main(void) { uint64_t s_b, e_b, i; ck_bytelock_t bytelock = CK_BYTELOCK_INITIALIZER; rwlock_t naive; for (i = 0; i < STEPS; i++) { ck_bytelock_write_lock(&bytelock, 1); ck_bytelock_write_unlock(&bytelock); } s_b = rdtsc(); for (i = 0; i < STEPS; i++) { ck_bytelock_write_lock(&bytelock, 1); ck_bytelock_write_unlock(&bytelock); } e_b = rdtsc(); printf("WRITE: bytelock %15" PRIu64 "\n", (e_b - s_b) / STEPS); rwlock_init(&naive); for (i = 0; i < STEPS; i++) { rwlock_write_lock(&naive); rwlock_write_unlock(&naive); } s_b = rdtsc(); for (i = 0; i < STEPS; i++) { rwlock_write_lock(&naive); rwlock_write_unlock(&naive); } e_b = rdtsc(); printf("WRITE: naive %15" PRIu64 "\n", (e_b - s_b) / STEPS); for (i = 0; i < STEPS; i++) { ck_bytelock_read_lock(&bytelock, 1); ck_bytelock_read_unlock(&bytelock, 1); } s_b = rdtsc(); for (i = 0; i < STEPS; i++) { ck_bytelock_read_lock(&bytelock, 1); ck_bytelock_read_unlock(&bytelock, 1); } e_b = rdtsc(); printf("READ: bytelock %15" PRIu64 "\n", (e_b - s_b) / STEPS); for (i = 0; i < STEPS; i++) { rwlock_read_lock(&naive); rwlock_read_unlock(&naive); } s_b = rdtsc(); for (i = 0; i < STEPS; i++) { rwlock_read_lock(&naive); rwlock_read_unlock(&naive); } e_b = rdtsc(); printf("READ: naive %15" PRIu64 "\n", (e_b - s_b) / STEPS); return (0); }