예제 #1
0
/* load_bitmap_data
 * @bitmap_table entries must satisfy specification constraints.
 * @bitmap must be cleared */
static int load_bitmap_data(BlockDriverState *bs,
                            const uint64_t *bitmap_table,
                            uint32_t bitmap_table_size,
                            BdrvDirtyBitmap *bitmap)
{
    int ret = 0;
    BDRVQcow2State *s = bs->opaque;
    uint64_t offset, limit;
    uint64_t bm_size = bdrv_dirty_bitmap_size(bitmap);
    uint8_t *buf = NULL;
    uint64_t i, tab_size =
            size_to_clusters(s,
                bdrv_dirty_bitmap_serialization_size(bitmap, 0, bm_size));

    if (tab_size != bitmap_table_size || tab_size > BME_MAX_TABLE_SIZE) {
        return -EINVAL;
    }

    buf = g_malloc(s->cluster_size);
    limit = bytes_covered_by_bitmap_cluster(s, bitmap);
    for (i = 0, offset = 0; i < tab_size; ++i, offset += limit) {
        uint64_t count = MIN(bm_size - offset, limit);
        uint64_t entry = bitmap_table[i];
        uint64_t data_offset = entry & BME_TABLE_ENTRY_OFFSET_MASK;

        assert(check_table_entry(entry, s->cluster_size) == 0);

        if (data_offset == 0) {
            if (entry & BME_TABLE_ENTRY_FLAG_ALL_ONES) {
                bdrv_dirty_bitmap_deserialize_ones(bitmap, offset, count,
                                                   false);
            } else {
                /* No need to deserialize zeros because the dirty bitmap is
                 * already cleared */
            }
        } else {
            ret = bdrv_pread(bs->file, data_offset, buf, s->cluster_size);
            if (ret < 0) {
                goto finish;
            }
            bdrv_dirty_bitmap_deserialize_part(bitmap, buf, offset, count,
                                               false);
        }
    }
    ret = 0;

    bdrv_dirty_bitmap_deserialize_finish(bitmap);

finish:
    g_free(buf);

    return ret;
}
예제 #2
0
파일: fs.c 프로젝트: mukadr/ghostfs
static int do_truncate(struct ghostfs *gfs, struct dir_iter *it, off_t new_size)
{
	int ret;
	int count;
	int next;
	struct cluster *c = NULL;

	if (new_size < 0)
		return -EINVAL;

	if (new_size > FILESIZE_MAX)
		return -EFBIG;

	if (dir_entry_is_directory(it->entry))
		return -EISDIR;

	next = it->entry->cluster;
	count = size_to_clusters(min(it->entry->size, new_size));

	if (count) {
		ret = cluster_at(gfs, next, count - 1, &c);
		if (ret < 0)
			return ret;

		next = c->hdr.next;
	}

	if (new_size > it->entry->size) {
		int alloc;
		long used = it->entry->size % CLUSTER_DATA;

		// zero remaining cluster space
		if (used) {
			memset(c->data + used, 0, CLUSTER_DATA - used);
			cluster_set_dirty(c, true);
		}

		alloc = size_to_clusters(new_size) - count;
		if (alloc) {
			ret = alloc_clusters(gfs, alloc, NULL, true);
			if (ret < 0)
				return ret;

			if (c) {
				c->hdr.next = ret;
				cluster_set_dirty(c, true);
			} else {
				it->entry->cluster = ret;
			}
		}
	} else if (new_size < it->entry->size) {
		if (next) {
			if (c) {
				c->hdr.next = 0;
				cluster_set_dirty(c, true);
			}

			ret = cluster_get(gfs, next, &c);
			if (ret < 0)
				return ret;

			free_clusters(gfs, c);
		}
	}

	dir_entry_set_size(it->entry, new_size, false);
	cluster_set_dirty(it->cluster, true);

	return 0;
}