void file_update_block(const char *blockdata, unsigned long long blocknr, unsigned int offsetblock, unsigned long long size, unsigned long long inode, unsigned char *chksum, off_t offset) { DBT *data = NULL; DBT *decrypted = NULL; DBT *cachedata; unsigned char *dbdata; INOBNO inobno; compr *uncompdata; BLKCACHE *blk; unsigned char *dtiger; INUSE *inuse; bool compressed = 1; QDTA *dta; #ifndef SHA3 word64 res[3]; #endif FUNC; LDEBUG ("file_update_block : inode %llu blocknr %llu offsetblock %llu, size %llu", inode, blocknr, (unsigned long long) offsetblock, (unsigned long long) size); inobno.inode = inode; inobno.blocknr = blocknr; dbdata = (unsigned char *) s_malloc(BLKSIZE); memset(dbdata, 0, BLKSIZE); data = try_block_cache(inode, blocknr, 0); if (NULL != data) { LDEBUG("try_block_cache : HIT"); memcpy(dbdata, data->data, data->size); memcpy(dbdata + offsetblock, blockdata, size); add_blk_to_cache(inode, blocknr, offsetblock + size, dbdata, offset); update_filesize(inode, size, offsetblock, blocknr, 0, 0, 0); free(dbdata); DBTfree(data); return; } else LDEBUG("%s: block not found in cache.", __FUNCTION__); // We don't need the old blockdata when we overwrite it completely anyway. if (offsetblock > 0 || size < BLKSIZE) { get_moddb_lock(); // First read the cache decrypted = search_memhash(dbdtaq, chksum, config->hashlen); if (NULL == decrypted) { LDEBUG("%s: Not in dbdtaq", __FUNCTION__); data = file_tgr_read_data(chksum); if (NULL == data) { LDEBUG("%s: Not found", __FUNCTION__); cachedata = search_memhash(blkcache, &inobno.inode, sizeof(unsigned long long)); if (NULL != cachedata) { blk = (BLKCACHE *) cachedata->data; #ifdef SHA3 dtiger=sha_binhash(blk->blockdata, BLKSIZE); #else binhash(blk->blockdata, BLKSIZE, res); dtiger=(unsigned char *)&res; #endif if (0 == memcmp(dtiger, chksum, config->hashlen)) { LDEBUG("%s: data alloc here", __FUNCTION__); data = s_malloc(sizeof(DBT)); data->data = s_malloc(BLKSIZE); data->size = BLKSIZE; memcpy(data->data, blk->blockdata, BLKSIZE); DBTfree(cachedata); compressed = 0; } else { LDEBUG ("updateBlock : Not in dbcache, out of luck."); loghash("updateBlock : No data found to read ", chksum); die_dataerr ("file_update_block : No data found to read - this should never happen: inode :%llu: blocknr :%llu", inode, blocknr); } #ifdef SHA3 free(dtiger); #endif } else { log_fatal_hash("file_update_block : No data found to read ", chksum); die_dataerr ("file_update_block : No data found to read, this should never happen: inode :%llu: blocknr :%llu", inode, blocknr); } } } else { data = s_malloc(sizeof(DBT)); dta = (QDTA *) decrypted->data; data->data = s_malloc(dta->size); memcpy(data->data, dta->data, dta->size); data->size = dta->size; DBTfree(decrypted); LDEBUG("data->size = %lu", data->size); } release_moddb_lock(); if (compressed && data->size < BLKSIZE) { #ifdef LZO uncompdata = lzo_decompress(data->data, data->size); #else uncompdata = clz_decompress(data->data, data->size); #endif memcpy(dbdata, uncompdata->data, uncompdata->size); comprfree(uncompdata); } else { memcpy(dbdata, data->data, data->size); } DBTfree(data); } memcpy(dbdata + offsetblock, blockdata, size); add_blk_to_cache(inode, blocknr, offsetblock + size, dbdata, offset); inuse = file_get_inuse(chksum); if (NULL == inuse) die_dataerr("file_update_block : hash not found"); if (inuse->inuse <= 1) { file_delete_data_cache(chksum, &inobno); put_on_freelist(inuse); delete_inuse(chksum); } else { inuse->inuse--; file_update_inuse(chksum, inuse); } free(inuse); update_filesize(inode, size, offsetblock, blocknr, 0, 0, 0); free(dbdata); EFUNC; return; }
int mongo_write(const char *path, const char *buf, size_t size, off_t offset, struct fuse_file_info *fi) { struct inode * e; int res; size_t reallen; int32_t realend = size, blk_offset = 0; const off_t write_end = size + offset; char * lock; bson doc, cond; mongo * conn = get_conn(); uint8_t hash[20]; time_t now = time(NULL); e = (struct inode*)fi->fh; if((res = get_cached_inode(path, e)) != 0) return res; if(e->mode & S_IFDIR) return -EISDIR; /* Uncomment this for incredibly slow length calculations. for(;realend >= 0 && buf[realend] == '\0'; realend--); realend++; for(blk_offset = 0; blk_offset < realend && buf[blk_offset] == 0; blk_offset++); blk_offset -= blk_offset > 0 ? 1 : 0; * The code below uses SSE4 instructions to find the first/last * zero bytes by doing 16-byte comparisons at a time. This should give * a ~16 speed boost on blocks with lots of zero bytes over the dumb * method above. */ if(size >= 16) { __m128i zero = _mm_setzero_si128(); lock = (char*)buf + size - 16; while(lock >= buf) { __m128i x = _mm_loadu_si128((__m128i*)lock); res = _mm_movemask_epi8(_mm_cmpeq_epi8(zero, x)); if(res == 0xffff) { lock -= 16; continue; } realend = lock - buf + fls(res ^ 0xffff); break; } if(lock <= buf) realend = 0; lock = (char*)buf; while(lock - buf < realend) { __m128i x = _mm_loadu_si128((__m128i*)lock); res = _mm_movemask_epi8(_mm_cmpeq_epi8(zero, x)); if(res == 0xffff) { lock += 16; continue; } blk_offset = lock - buf + ffs(res ^ 0xffff) - 1; break; } } reallen = realend - blk_offset; if(reallen == 0) { pthread_mutex_lock(&e->wr_lock); res = insert_empty(&e->wr_extent, offset, size); goto end; } #ifdef __APPLE__ CC_SHA1(buf, size, hash); #else SHA1(buf, size, hash); #endif bson_init(&cond); bson_append_binary(&cond, "_id", 0, (char*)hash, sizeof(hash)); bson_finish(&cond); bson_init(&doc); bson_append_start_object(&doc, "$setOnInsert"); char * comp_out = get_compress_buf(); size_t comp_size = snappy_max_compressed_length(reallen); if((res = snappy_compress(buf + blk_offset, reallen, comp_out, &comp_size)) != SNAPPY_OK) { fprintf(stderr, "Error compressing input: %d\n", res); return -EIO; } bson_append_binary(&doc, "data", 0, comp_out, comp_size); bson_append_int(&doc, "offset", blk_offset); bson_append_int(&doc, "size", size); bson_append_time_t(&doc, "created", now); bson_append_finish_object(&doc); bson_finish(&doc); res = mongo_update(conn, blocks_name, &cond, &doc, MONGO_UPDATE_UPSERT, NULL); bson_destroy(&doc); bson_destroy(&cond); if(res != MONGO_OK) { fprintf(stderr, "Error committing block %s\n", conn->lasterrstr); return -EIO; } pthread_mutex_lock(&e->wr_lock); res = insert_hash(&e->wr_extent, offset, size, hash); end: if(write_end > e->size) e->size = write_end; if(now - e->wr_age > 3) { res = serialize_extent(e, e->wr_extent); if(res != 0) { pthread_mutex_unlock(&e->wr_lock); return res; } e->wr_age = now; } pthread_mutex_unlock(&e->wr_lock); if(res != 0) return res; res = update_filesize(e, write_end); if(res != 0) return res; return size; }
void add_file_block(BLKDTA * blkdta) { INOBNO inobno; DBT *cachedata = NULL; INUSE *inuse; OFFHASH *offhash; inobno.inode = blkdta->inode; inobno.blocknr = blkdta->blocknr; FUNC; LDEBUG("add_file_block : inode %llu - %llu", inobno.inode, inobno.blocknr); if (blkdta->bsize + blkdta->offsetblock < BLKSIZE) { // Flush the blockcache before overwriting. cachedata = try_block_cache(blkdta->inode, blkdta->blocknr, 0); if (cachedata) DBTfree(cachedata); add_blk_to_cache(blkdta->inode, blkdta->blocknr, blkdta->bsize + blkdta->offsetblock, blkdta->blockfiller, blkdta->offsetfile); LDEBUG ("add_file_block : wrote with add_blk_to_cache : inode %llu - %llu size %i", inobno.inode, inobno.blocknr, blkdta->bsize); update_filesize(blkdta->inode, blkdta->bsize, blkdta->offsetblock, blkdta->blocknr, blkdta->sparse, 0, 0); return; } inuse = file_get_inuse(blkdta->stiger); if (inuse == NULL) { if (NULL == blkdta->compressed) { #ifdef LZO blkdta->compressed = lzo_compress(blkdta->blockfiller, BLKSIZE); #else blkdta->compressed = clz_compress(blkdta->blockfiller, BLKSIZE); #endif } LDEBUG("Compressed %i bytes to %lu bytes", BLKSIZE, blkdta->compressed->size); loghash("add_file_block call qdta for hash :", blkdta->stiger); inuse = s_malloc(sizeof(INUSE)); inuse->inuse = 0; inuse->offset = get_offset(blkdta->compressed->size); LDEBUG("add to offset %llu", inuse->offset); inuse->size = blkdta->compressed->size; file_qdta(&inobno, blkdta->stiger, blkdta->compressed->data, blkdta->compressed->size, inuse->offset); loghash("add_file_block queued with qdta", blkdta->stiger); update_filesize(blkdta->inode, blkdta->bsize, blkdta->offsetblock, blkdta->blocknr, blkdta->sparse, blkdta->compressed->size, 0); } else { update_filesize(blkdta->inode, blkdta->bsize, blkdta->offsetblock, blkdta->blocknr, blkdta->sparse, 0, 1); } if (NULL != blkdta->compressed) comprfree(blkdta->compressed); inuse->inuse = inuse->inuse + 1; file_update_inuse(blkdta->stiger, inuse); offhash = s_malloc(sizeof(OFFHASH)); offhash->offset = blkdta->offsetfile; memcpy(offhash->stiger, blkdta->stiger, config->hashlen); write_dbb_to_cache(&inobno, offhash); free(offhash); free(inuse); EFUNC; return; }