static bool file_read_chunk(unsigned char *chunk, const unsigned char *digest, void *db_info) { struct db *db = db_info; unsigned char *db_chunk; bool status = false; flock(db->fd, LOCK_SH); db_chunk = lookup_chunk(db, digest); if (db_chunk) { if (IS_ERR(db_chunk)) { TRACE("digest=%s: %s\n", digest_string(digest), strerror(PTR_ERR(db_chunk))); } else { status = true; memcpy(chunk, db_chunk, CHUNK_SIZE); unmap_chunk(db_chunk); } } flock(db->fd, LOCK_UN); return status; }
static bool file_write_chunk(const unsigned char *chunk, const unsigned char *digest, void *db_info) { struct db *db = db_info; unsigned char *db_chunk; bool status = false; int error; flock(db->fd, LOCK_EX); db_chunk = lookup_chunk(db, digest); if (db_chunk) { if (IS_ERR(db_chunk)) { TRACE("lookup_chunk(%s): %s\n", digest_string(digest), strerror(PTR_ERR(db_chunk))); } else status = true; goto out; } /* * When adding a new chunk, the file needs to be resized, otherwise * any access to the chunk will cause a SIGBUS. */ if (ftruncate(db->fd, ((off_t)db->next_nr + 1) * CHUNK_SIZE)) { TRACE("ftruncate(%u * CHUNK_SIZE): %s\n", db->next_nr + 1, strerror(errno)); goto out; } db_chunk = __map_chunk(db, db->next_nr, 0); if (IS_ERR(db_chunk)) { TRACE("__map_chunk(%u): %s\n", db->next_nr, strerror(PTR_ERR(db_chunk))); goto out; } memcpy(db_chunk, chunk, CHUNK_SIZE); error = hash_insert(db, *(uint32_t *)digest, db->next_nr); if (error) { TRACE("hash_insert(0x%x, %u): %s\n", *(uint32_t *)digest, db->next_nr, strerror(-error)); goto out; } status = true; db->next_nr ++; out: unmap_chunk(db_chunk); flock(db->fd, LOCK_UN); return status; }
static inline void release_chunk(struct mc_chunk *chunk) { struct mcache *cache = chunk->cache; TAILQ_REMOVE(&cache->chunks, chunk, node); cache->chunk_num--; if (!cache->chunk_num) { if (cache->reserve) release_reserved(cache); unmap_chunk(chunk); } else if (!cache->reserve) { cache->reserve = chunk; DBGTRACE("%s(%d): reserve cache:%p chunk:%p\n", __func__, __LINE__, cache, chunk); } else { unmap_chunk(chunk); } }
unsigned char *lookup_chunk(struct db *db, const unsigned char *digest) { struct index *root = db->root; struct index *leaf; uint32_t hash = *(uint32_t *)digest; int i, leaf_nr; unsigned char *chunk; /* XXX: this may need to become a binary search */ for (leaf_nr = 1; leaf_nr < be32toh(root[0].hash); leaf_nr ++) if (hash < be32toh(root[leaf_nr].hash)) break; TRACE("leaf_nr=%d chunk_nr=%u hash=%x\n", leaf_nr, be32toh(root[leaf_nr-1].chunk_nr), be32toh(root[leaf_nr-1].hash)); leaf = map_chunk(db, be32toh(root[leaf_nr - 1].chunk_nr)); if (IS_ERR(leaf)) return (void *)leaf; for (i = 0; i < MAX_INDEX; i ++) { if (be32toh(leaf[i].chunk_nr) == INVALID_CHUNK_NR) break; if (hash < be32toh(leaf[i].hash)) break; if (hash == be32toh(leaf[i].hash)) { chunk = map_chunk(db, be32toh(leaf[i].chunk_nr)); if (IS_ERR(chunk)) goto out; if (verify_chunk(chunk, digest)) goto out; unmap_chunk(chunk); } } chunk = NULL; out: unmap_chunk(leaf); return chunk; }
static inline void release_reserved(struct mcache *cache) { unmap_chunk(cache->reserve); cache->reserve = NULL; }
static int hash_insert(struct db *db, uint32_t hash, uint32_t chunk_nr) { struct index *root = db->root; struct index *leaf; struct index *split; int i, split_at, leaf_nr; /* XXX: this may need to become a binary search */ for (leaf_nr = 1; leaf_nr < be32toh(root[0].hash); leaf_nr ++) if (hash < be32toh(root[leaf_nr].hash)) break; if (be32toh(root[0].hash) == MAX_INDEX) return -ENOSPC; leaf = map_chunk(db, be32toh(root[leaf_nr - 1].chunk_nr)); if (IS_ERR(leaf)) return -PTR_ERR(leaf); /* XXX: this may need to become a binary search */ for (i = 0; i < MAX_INDEX; i ++) { if (be32toh(leaf[i].chunk_nr) == INVALID_CHUNK_NR) break; if (hash < be32toh(leaf[i].hash)) break; } if (be32toh(leaf[MAX_INDEX-1].chunk_nr) != INVALID_CHUNK_NR) goto split_leaf; do_insert: memmove(leaf + i + 1, leaf + i, sizeof(*leaf) * (MAX_INDEX - i - 1)); leaf[i].hash = htobe32(hash); leaf[i].chunk_nr = htobe32(chunk_nr); unmap_chunk(leaf); return 0; split_leaf: /* * Be smart where to split. If a hash repeats, make sure that * all it stays in the same leaf. */ for (split_at = SPLIT_AT; split_at != MAX_INDEX; split_at ++) if (leaf[split_at].hash.v != leaf[split_at-1].hash.v) goto split_here; for (split_at = SPLIT_AT-1; split_at > 0; split_at --) if (leaf[split_at].hash.v != leaf[split_at-1].hash.v) goto split_here; unmap_chunk(leaf); return -ENOSPC; split_here: split = map_chunk(db, be32toh(root[0].hash)); if (IS_ERR(split)) { unmap_chunk(leaf); return -PTR_ERR(split); } memcpy(split, leaf + split_at, sizeof(*leaf) * (MAX_INDEX - split_at)); memset(leaf + split_at, 0, sizeof(*leaf) * (MAX_INDEX - split_at)); memmove(root + leaf_nr + 1, root + leaf_nr, sizeof(*root) * (be32toh(root[0].hash) - leaf_nr)); root[leaf_nr].hash = split[0].hash; root[leaf_nr].chunk_nr = root[0].hash; root[0].hash = htobe32(be32toh(root[0].hash) + 1); if (i > split_at) { unmap_chunk(leaf); leaf = split; i -= split_at; } else unmap_chunk(split); goto do_insert; }