Esempio n. 1
0
void parse_susp_rock_ridge_plcl(struct rrii_dir_record *dir, u32_t block) {
	struct inode *rep_inode;
	struct buf *bp;
	struct iso9660_dir_record *dir_rec;
	struct dir_extent extent;
	struct inode_dir_entry dummy_dir_entry;
	size_t dummy_offset = 0;

	/* Check if inode wasn't already parsed. */
	rep_inode = inode_cache_get(block);
	if (rep_inode != NULL) {
		rep_inode->i_refcount++;
		dir->reparented_inode = rep_inode;
		return;
	}

	/* Peek ahead to build extent for read_inode. */
	if (lmfs_get_block(&bp, fs_dev, block, NORMAL) != OK)
		return;

	dir_rec = (struct iso9660_dir_record*)b_data(bp);

	extent.location = block;
	extent.length = dir_rec->data_length_l / v_pri.logical_block_size_l;
	if (dir_rec->data_length_l % v_pri.logical_block_size_l)
		extent.length++;
	extent.next = NULL;
	lmfs_put_block(bp);

	memset(&dummy_dir_entry, 0, sizeof(struct inode_dir_entry));
	read_inode(&dummy_dir_entry, &extent, &dummy_offset);
	free(dummy_dir_entry.r_name);
	dir->reparented_inode = dummy_dir_entry.i_node;
}
Esempio n. 2
0
struct buf* read_extent_block(struct dir_extent *e, size_t block)
{
	size_t block_id = get_extent_absolute_block_id(e, block);

	if (block_id == 0 || block_id >= v_pri.volume_space_size_l)
		return NULL;

	return lmfs_get_block(fs_dev, block_id, NORMAL);
}
Esempio n. 3
0
int
readblock(int b, int blocksize, u32_t seed, char *data)
{
	struct buf *bp;

	assert(blocksize == curblocksize);

	if(!(bp = lmfs_get_block(MYDEV, b, NORMAL))) {
		e(30);
		return 0;
	}

	memcpy(data, bp->data, blocksize);

	lmfs_put_block(bp, FULL_DATA_BLOCK);

	return blocksize;
}
int
readblock(int b, int blocksize, u32_t seed, char *data)
{
	struct buf *bp;
	int r;

	assert(blocksize == curblocksize);

	if ((r = lmfs_get_block(&bp, MYDEV, b, NORMAL)) != 0) {
		e(30);
		return 0;
	}

	memcpy(data, bp->data, blocksize);

	lmfs_put_block(bp);

	return blocksize;
}
/*===========================================================================*
 *				lmfs_readahead				     *
 *===========================================================================*/
void lmfs_readahead(dev_t dev, block64_t base_block, unsigned int nblocks,
	size_t last_size)
{
/* Read ahead 'nblocks' blocks starting from the block 'base_block' on device
 * 'dev'. The number of blocks must be between 1 and LMFS_MAX_PREFETCH,
 * inclusive. All blocks have the file system's block size, possibly except the
 * last block in the range, which is of size 'last_size'. The caller must
 * ensure that none of the blocks in the range are already in the cache.
 * However, the caller must also not rely on all or even any of the blocks to
 * be present in the cache afterwards--failures are (deliberately!) ignored.
 */
  static noxfer_buf_ptr_t bufq[LMFS_MAX_PREFETCH]; /* static for size only */
  struct buf *bp;
  unsigned int count;
  int r;

  assert(nblocks >= 1 && nblocks <= LMFS_MAX_PREFETCH);

  for (count = 0; count < nblocks; count++) {
	if (count == nblocks - 1)
		r = lmfs_get_partial_block(&bp, dev, base_block + count,
		    NO_READ, last_size);
	else
		r = lmfs_get_block(&bp, dev, base_block + count, NO_READ);

	if (r != OK)
		break;

	/* We could add a flag that makes the get_block() calls fail if the
	 * block is already in the cache, but it is not a major concern if it
	 * is: we just perform a useless read in that case. However, if the
	 * block is cached *and* dirty, we are about to lose its new contents.
	 */
	assert(lmfs_isclean(bp));

	bufq[count] = bp;
  }

  rw_scattered(dev, bufq, count, READING);
}
Esempio n. 6
0
File: bio.c Progetto: Hooman3/minix
/*
 * Prefetch up to "nblocks" blocks on "dev" starting from block number "block".
 * Stop early when either the I/O request fills up or when a block is already
 * found to be in the cache.  The latter is likely to happen often, since this
 * function is called before getting each block for reading.  Prefetching is a
 * strictly best-effort operation, and may fail silently.
 * TODO: limit according to the number of available buffers.
 */
static void
block_prefetch(dev_t dev, block_t block, block_t nblocks)
{
	struct buf *bp, *bufs[NR_IOREQS];
	unsigned int count;

	for (count = 0; count < nblocks; count++) {
		bp = lmfs_get_block(dev, block + count, PREFETCH);
		assert(bp != NULL);

		if (lmfs_dev(bp) != NO_DEV) {
			lmfs_put_block(bp, FULL_DATA_BLOCK);

			break;
		}

		bufs[count] = bp;
	}

	if (count > 0)
		lmfs_rw_scattered(dev, bufs, count, READING);
}
Esempio n. 7
0
File: bio.c Progetto: Hooman3/minix
/*
 * Perform block I/O, on "dev", starting from offset "pos", for a total of
 * "bytes" bytes.  Reading, writing, and peeking are highly similar, and thus,
 * this function implements all of them.  The "call" parameter indicates the
 * call type (one of FSC_READ, FSC_WRITE, FSC_PEEK).  For read and write calls,
 * "data" will identify the user buffer to use; for peek calls, "data" is set
 * to NULL.  In all cases, this function returns the number of bytes
 * successfully transferred, 0 on end-of-file conditions, and a negative error
 * code if no bytes could be transferred due to an error.  Dirty data is not
 * flushed immediately, and thus, a successful write only indicates that the
 * data have been taken in by the cache (for immediate I/O, a character device
 * would have to be used, but MINIX3 no longer supports this), which may be
 * follwed later by silent failures, including undetected end-of-file cases.
 * In particular, write requests may or may not return 0 (EOF) immediately when
 * writing at or beyond the block device's size. i Since block I/O takes place
 * at block granularity, block-unaligned writes have to read a block from disk
 * before updating it, and that is the only possible source of actual I/O
 * errors for write calls.
 * TODO: reconsider the buffering-only approach, or see if we can at least
 * somehow throw accurate EOF errors without reading in each block first.
 */
ssize_t
lmfs_bio(dev_t dev, struct fsdriver_data * data, size_t bytes, off_t pos,
	int call)
{
	block_t block, blocks_left;
	size_t block_size, off, block_off, chunk;
	struct buf *bp;
	int r, write, how;

	if (dev == NO_DEV)
		return EINVAL;

	block_size = lmfs_fs_block_size();
	write = (call == FSC_WRITE);

	assert(block_size > 0);

	/* FIXME: block_t is 32-bit, so we have to impose a limit here. */
	if (pos < 0 || pos / block_size > UINT32_MAX || bytes > SSIZE_MAX)
		return EINVAL;

	off = 0;
	block = pos / block_size;
	block_off = (size_t)(pos % block_size);
	blocks_left = howmany(block_off + bytes, block_size);

	lmfs_reset_rdwt_err();
	r = OK;

	for (off = 0; off < bytes; off += chunk) {
		chunk = block_size - block_off;
		if (chunk > bytes - off)
			chunk = bytes - off;

		/*
		 * For read requests, help the block driver form larger I/O
		 * requests.
		 */
		if (!write)
			block_prefetch(dev, block, blocks_left);

		/*
		 * Do not read the block from disk if we will end up
		 * overwriting all of its contents.
		 */
		how = (write && chunk == block_size) ? NO_READ : NORMAL;

		bp = lmfs_get_block(dev, block, how);
		assert(bp);

		r = lmfs_rdwt_err();

		if (r == OK && data != NULL) {
			assert(lmfs_dev(bp) != NO_DEV);

			if (write) {
				r = fsdriver_copyin(data, off,
				    (char *)bp->data + block_off, chunk);

				/*
				 * Mark the block as dirty even if the copy
				 * failed, since the copy may in fact have
				 * succeeded partially.  This is an interface
				 * issue that should be resolved at some point,
				 * but for now we do not want the cache to be
				 * desynchronized from the disk contents.
				 */
				lmfs_markdirty(bp);
			} else
				r = fsdriver_copyout(data, off,
				    (char *)bp->data + block_off, chunk);
		}

		lmfs_put_block(bp, FULL_DATA_BLOCK);

		if (r != OK)
			break;

		block++;
		block_off = 0;
		blocks_left--;
	}

	/*
	 * If we were not able to do any I/O, return the error (or EOF, even
	 * for writes).  Otherwise, return how many bytes we did manage to
	 * transfer.
	 */
	if (r != OK && off == 0)
		return (r == END_OF_FILE) ? 0 : r;

	return off;
}
Esempio n. 8
0
File: susp.c Progetto: Hooman3/minix
int parse_susp(struct rrii_dir_record *dir, char *buffer)
{
	/* Parse fundamental SUSP entries */
	char susp_signature[2];
	u8_t susp_length;
	u8_t susp_version;

	u32_t ca_block_nr;
	u32_t ca_offset;
	u32_t ca_length;
	struct buf *ca_bp;

	susp_signature[0] = buffer[0];
	susp_signature[1] = buffer[1];
	susp_length = *((u8_t*)buffer + 2);
	susp_version = *((u8_t*)buffer + 3);

	if ((susp_signature[0] == 'C') && (susp_signature[1] == 'E') &&
	    (susp_length >= 28) && (susp_version >= 1)) {
		/*
		 * Continuation area, perform a recursion.
		 *
		 * FIXME: Currently we're parsing only first logical block of a
		 * continuation area, and infinite recursion is not checked.
		 */

		ca_block_nr = *((u32_t*)(buffer + 4));
		ca_offset = *((u32_t*)(buffer + 12));
		ca_length = *((u32_t*)(buffer + 20));

		/* Truncate continuation area to fit one logical block. */
		if (ca_offset >= v_pri.logical_block_size_l) {
			return EINVAL;
		}
		if (ca_offset + ca_length > v_pri.logical_block_size_l) {
			ca_length = v_pri.logical_block_size_l - ca_offset;
		}

		ca_bp = lmfs_get_block(fs_dev, ca_block_nr, NORMAL);
		if (ca_bp == NULL) {
			return EINVAL;
		}

		parse_susp_buffer(dir, b_data(ca_bp) + ca_offset, ca_length);
		lmfs_put_block(ca_bp, FULL_DATA_BLOCK);

		return OK;
	}
	else if ((susp_signature[0] == 'P') && (susp_signature[1] == 'D')) {
		/* Padding, skip. */
		return OK;
	}
	else if ((susp_signature[0] == 'S') && (susp_signature[1] == 'P')) {
		/* Ignored, skip. */
		return OK;
	}
	else if ((susp_signature[0] == 'S') && (susp_signature[1] == 'T')) {
		/* Terminator entry, stop processing. */
		return(ECANCELED);
	}
	else if ((susp_signature[0] == 'E') && (susp_signature[1] == 'R')) {
		/* Ignored, skip. */
		return OK;
	}
	else if ((susp_signature[0] == 'E') && (susp_signature[1] == 'S')) {
		/* Ignored, skip. */
		return OK;
	}

	/* Not a SUSP fundamental entry. */
	return EINVAL;
}