Пример #1
0
static int hfs_parse_new_pmap(struct super_block *sb, void *buf,
		struct new_pmap *pm, sector_t *part_start, sector_t *part_size)
{
	struct hfsplus_sb_info *sbi = HFSPLUS_SB(sb);
	int size = be32_to_cpu(pm->pmMapBlkCnt);
	int buf_size = hfsplus_min_io_size(sb);
	int res;
	int i = 0;

	do {
		if (!memcmp(pm->pmPartType, "Apple_HFS", 9) &&
		    (sbi->part < 0 || sbi->part == i)) {
			*part_start += be32_to_cpu(pm->pmPyPartStart);
			*part_size = be32_to_cpu(pm->pmPartBlkCnt);
			return 0;
		}

		if (++i >= size)
			return -ENOENT;

		pm = (struct new_pmap *)((u8 *)pm + HFSPLUS_SECTOR_SIZE);
		if ((u8 *)pm - (u8 *)buf >= buf_size) {
			res = hfsplus_submit_bio(sb,
						 *part_start + HFS_PMAP_BLK + i,
						 buf, (void **)&pm, REQ_OP_READ,
						 0);
			if (res)
				return res;
		}
	} while (pm->pmSig == cpu_to_be16(HFS_NEW_PMAP_MAGIC));

	return -ENOENT;
}
Пример #2
0
/*
 * Parse the partition map looking for the start and length of a
 * HFS/HFS+ partition.
 */
int hfs_part_find(struct super_block *sb,
		sector_t *part_start, sector_t *part_size)
{
	void *buf, *data;
	int res;

	buf = kmalloc(hfsplus_min_io_size(sb), GFP_KERNEL);
	if (!buf)
		return -ENOMEM;

	res = hfsplus_submit_bio(sb, *part_start + HFS_PMAP_BLK,
				 buf, &data, REQ_OP_READ, 0);
	if (res)
		goto out;

	switch (be16_to_cpu(*((__be16 *)data))) {
	case HFS_OLD_PMAP_MAGIC:
		res = hfs_parse_old_pmap(sb, data, part_start, part_size);
		break;
	case HFS_NEW_PMAP_MAGIC:
		res = hfs_parse_new_pmap(sb, buf, data, part_start, part_size);
		break;
	default:
		res = -ENOENT;
		break;
	}
out:
	kfree(buf);
	return res;
}
Пример #3
0
/* Takes in super block, returns true if good data read */
int hfsplus_read_wrapper(struct super_block *sb)
{
    struct hfsplus_sb_info *sbi = HFSPLUS_SB(sb);
    struct hfsplus_wd wd;
    sector_t part_start, part_size;
    u32 blocksize;
    int error = 0;

    error = -EINVAL;
    blocksize = sb_min_blocksize(sb, HFSPLUS_SECTOR_SIZE);
    if (!blocksize)
        goto out;

    if (hfsplus_get_last_session(sb, &part_start, &part_size))
        goto out;

    error = -ENOMEM;
    sbi->s_vhdr_buf = kmalloc(hfsplus_min_io_size(sb), GFP_KERNEL);
    if (!sbi->s_vhdr_buf)
        goto out;
    sbi->s_backup_vhdr_buf = kmalloc(hfsplus_min_io_size(sb), GFP_KERNEL);
    if (!sbi->s_backup_vhdr_buf)
        goto out_free_vhdr;

reread:
    error = hfsplus_submit_bio(sb, part_start + HFSPLUS_VOLHEAD_SECTOR,
                               sbi->s_vhdr_buf, (void **)&sbi->s_vhdr,
                               READ);
    if (error)
        goto out_free_backup_vhdr;

    error = -EINVAL;
    switch (sbi->s_vhdr->signature) {
    case cpu_to_be16(HFSPLUS_VOLHEAD_SIGX):
        set_bit(HFSPLUS_SB_HFSX, &sbi->flags);
    /*FALLTHRU*/
    case cpu_to_be16(HFSPLUS_VOLHEAD_SIG):
        break;
    case cpu_to_be16(HFSP_WRAP_MAGIC):
        if (!hfsplus_read_mdb(sbi->s_vhdr, &wd))
            goto out_free_backup_vhdr;
        wd.ablk_size >>= HFSPLUS_SECTOR_SHIFT;
        part_start += (sector_t)wd.ablk_start +
                      (sector_t)wd.embed_start * wd.ablk_size;
        part_size = (sector_t)wd.embed_count * wd.ablk_size;
        goto reread;
    default:
        /*
         * Check for a partition block.
         *
         * (should do this only for cdrom/loop though)
         */
        if (hfs_part_find(sb, &part_start, &part_size))
            goto out_free_backup_vhdr;
        goto reread;
    }

    error = hfsplus_submit_bio(sb, part_start + part_size - 2,
                               sbi->s_backup_vhdr_buf,
                               (void **)&sbi->s_backup_vhdr, READ);
    if (error)
        goto out_free_backup_vhdr;

    error = -EINVAL;
    if (sbi->s_backup_vhdr->signature != sbi->s_vhdr->signature) {
        pr_warn("invalid secondary volume header\n");
        goto out_free_backup_vhdr;
    }

    blocksize = be32_to_cpu(sbi->s_vhdr->blocksize);

    /*
     * Block size must be at least as large as a sector and a multiple of 2.
     */
    if (blocksize < HFSPLUS_SECTOR_SIZE || ((blocksize - 1) & blocksize))
        goto out_free_backup_vhdr;
    sbi->alloc_blksz = blocksize;
    sbi->alloc_blksz_shift = 0;
    while ((blocksize >>= 1) != 0)
        sbi->alloc_blksz_shift++;
    blocksize = min(sbi->alloc_blksz, (u32)PAGE_SIZE);

    /*
     * Align block size to block offset.
     */
    while (part_start & ((blocksize >> HFSPLUS_SECTOR_SHIFT) - 1))
        blocksize >>= 1;

    if (sb_set_blocksize(sb, blocksize) != blocksize) {
        pr_err("unable to set blocksize to %u!\n", blocksize);
        goto out_free_backup_vhdr;
    }

    sbi->blockoffset =
        part_start >> (sb->s_blocksize_bits - HFSPLUS_SECTOR_SHIFT);
    sbi->part_start = part_start;
    sbi->sect_count = part_size;
    sbi->fs_shift = sbi->alloc_blksz_shift - sb->s_blocksize_bits;
    return 0;

out_free_backup_vhdr:
    kfree(sbi->s_backup_vhdr_buf);
out_free_vhdr:
    kfree(sbi->s_vhdr_buf);
out:
    return error;
}
Пример #4
0
int hfsplus_sync_fs(struct super_block *sb, int wait)
{
	struct hfsplus_sb_info *sbi = HFSPLUS_SB(sb);
	struct hfsplus_vh *vhdr = sbi->s_vhdr;
	int write_backup = 0;
	int error, error2;

	if (!wait)
		return 0;

	dprint(DBG_SUPER, "hfsplus_write_super\n");

	sb->s_dirt = 0;

	/*
	 * Explicitly write out the special metadata inodes.
	 *
	 * While these special inodes are marked as hashed and written
	 * out peridocically by the flusher threads we redirty them
	 * during writeout of normal inodes, and thus the life lock
	 * prevents us from getting the latest state to disk.
	 */
	error = filemap_write_and_wait(sbi->cat_tree->inode->i_mapping);
	error2 = filemap_write_and_wait(sbi->ext_tree->inode->i_mapping);
	if (!error)
		error = error2;
	error2 = filemap_write_and_wait(sbi->alloc_file->i_mapping);
	if (!error)
		error = error2;

	mutex_lock(&sbi->vh_mutex);
	mutex_lock(&sbi->alloc_mutex);
	vhdr->free_blocks = cpu_to_be32(sbi->free_blocks);
	vhdr->next_cnid = cpu_to_be32(sbi->next_cnid);
	vhdr->folder_count = cpu_to_be32(sbi->folder_count);
	vhdr->file_count = cpu_to_be32(sbi->file_count);

	if (test_and_clear_bit(HFSPLUS_SB_WRITEBACKUP, &sbi->flags)) {
		memcpy(sbi->s_backup_vhdr, sbi->s_vhdr, sizeof(*sbi->s_vhdr));
		write_backup = 1;
	}

	error2 = hfsplus_submit_bio(sb->s_bdev,
				   sbi->part_start + HFSPLUS_VOLHEAD_SECTOR,
				   sbi->s_vhdr, WRITE_SYNC);
	if (!error)
		error = error2;
	if (!write_backup)
		goto out;

	error2 = hfsplus_submit_bio(sb->s_bdev,
				  sbi->part_start + sbi->sect_count - 2,
				  sbi->s_backup_vhdr, WRITE_SYNC);
	if (!error)
		error2 = error;
out:
	mutex_unlock(&sbi->alloc_mutex);
	mutex_unlock(&sbi->vh_mutex);

	if (!test_bit(HFSPLUS_SB_NOBARRIER, &sbi->flags))
		blkdev_issue_flush(sb->s_bdev, GFP_KERNEL, NULL);

	return error;
}