void parse_susp_rock_ridge_plcl(struct rrii_dir_record *dir, u32_t block) {
	struct inode *rep_inode;
	struct buf *bp;
	struct iso9660_dir_record *dir_rec;
	struct dir_extent extent;
	struct inode_dir_entry dummy_dir_entry;
	size_t dummy_offset = 0;

	/* Check if inode wasn't already parsed. */
	rep_inode = inode_cache_get(block);
	if (rep_inode != NULL) {
		rep_inode->i_refcount++;
		dir->reparented_inode = rep_inode;
		return;
	}

	/* Peek ahead to build extent for read_inode. */
	if (lmfs_get_block(&bp, fs_dev, block, NORMAL) != OK)
		return;

	dir_rec = (struct iso9660_dir_record*)b_data(bp);

	extent.location = block;
	extent.length = dir_rec->data_length_l / v_pri.logical_block_size_l;
	if (dir_rec->data_length_l % v_pri.logical_block_size_l)
		extent.length++;
	extent.next = NULL;
	lmfs_put_block(bp);

	memset(&dummy_dir_entry, 0, sizeof(struct inode_dir_entry));
	read_inode(&dummy_dir_entry, &extent, &dummy_offset);
	free(dummy_dir_entry.r_name);
	dir->reparented_inode = dummy_dir_entry.i_node;
}
Beispiel #2
0
static struct buf* fetch_inode(struct dir_extent *extent, size_t *offset)
{
	struct iso9660_dir_record *dir_rec;
	struct buf *bp;

	/*
	 * Directory entries aren't allowed to cross a logical block boundary in
	 * ISO 9660, so we keep searching until we find something or reach the
	 * end of the extent.
	 */
	bp = read_extent_block(extent, *offset / v_pri.logical_block_size_l);
	while (bp != NULL) {
		dir_rec = (struct iso9660_dir_record*)(b_data(bp) + *offset %
		          v_pri.logical_block_size_l);
		if (dir_rec->length == 0) {
			*offset -= *offset % v_pri.logical_block_size_l;
			*offset += v_pri.logical_block_size_l;
		}
		else {
			break;
		}

		lmfs_put_block(bp, FULL_DATA_BLOCK);
		bp = read_extent_block(extent, *offset /
		    v_pri.logical_block_size_l);
	}

	return bp;
}
Beispiel #3
0
int read_inode(struct inode *i_node, struct dir_extent *extent, size_t offset,
	size_t *new_offset)
{
	struct iso9660_dir_record *dir_rec;
	struct buf *bp;

	/* Find inode. */
	bp = fetch_inode(extent, &offset);
	if (bp == NULL)
		return EOF;

	dir_rec = (struct iso9660_dir_record*)(b_data(bp) + offset %
	          v_pri.logical_block_size_l);

	/* Parse basic ISO 9660 specs. */
	if (check_dir_record(dir_rec,
	    offset % v_pri.logical_block_size_l) != OK) {
		lmfs_put_block(bp, FULL_DATA_BLOCK);
		return EINVAL;
	}

	memset(&i_node->i_stat, 0, sizeof(struct stat));

	i_node->i_stat.st_ino = get_extent_absolute_block_id(extent,
	    offset / v_pri.logical_block_size_l) * v_pri.logical_block_size_l +
	    offset % v_pri.logical_block_size_l;

	read_inode_iso9660(i_node, dir_rec);

	/* Parse extensions. */
	read_inode_susp(i_node, dir_rec, bp,
	    offset % v_pri.logical_block_size_l);

	offset += dir_rec->length;
	read_inode_extents(i_node, dir_rec, extent, &offset);

	lmfs_put_block(bp, FULL_DATA_BLOCK);
	if (new_offset != NULL)
		*new_offset = offset;
	return OK;
}
Beispiel #4
0
int
readblock(int b, int blocksize, u32_t seed, char *data)
{
	struct buf *bp;

	assert(blocksize == curblocksize);

	if(!(bp = lmfs_get_block(MYDEV, b, NORMAL))) {
		e(30);
		return 0;
	}

	memcpy(data, bp->data, blocksize);

	lmfs_put_block(bp, FULL_DATA_BLOCK);

	return blocksize;
}
int
readblock(int b, int blocksize, u32_t seed, char *data)
{
	struct buf *bp;
	int r;

	assert(blocksize == curblocksize);

	if ((r = lmfs_get_block(&bp, MYDEV, b, NORMAL)) != 0) {
		e(30);
		return 0;
	}

	memcpy(data, bp->data, blocksize);

	lmfs_put_block(bp);

	return blocksize;
}
Beispiel #6
0
/*
 * Prefetch up to "nblocks" blocks on "dev" starting from block number "block".
 * Stop early when either the I/O request fills up or when a block is already
 * found to be in the cache.  The latter is likely to happen often, since this
 * function is called before getting each block for reading.  Prefetching is a
 * strictly best-effort operation, and may fail silently.
 * TODO: limit according to the number of available buffers.
 */
static void
block_prefetch(dev_t dev, block_t block, block_t nblocks)
{
	struct buf *bp, *bufs[NR_IOREQS];
	unsigned int count;

	for (count = 0; count < nblocks; count++) {
		bp = lmfs_get_block(dev, block + count, PREFETCH);
		assert(bp != NULL);

		if (lmfs_dev(bp) != NO_DEV) {
			lmfs_put_block(bp, FULL_DATA_BLOCK);

			break;
		}

		bufs[count] = bp;
	}

	if (count > 0)
		lmfs_rw_scattered(dev, bufs, count, READING);
}
Beispiel #7
0
/*
 * Perform block I/O, on "dev", starting from offset "pos", for a total of
 * "bytes" bytes.  Reading, writing, and peeking are highly similar, and thus,
 * this function implements all of them.  The "call" parameter indicates the
 * call type (one of FSC_READ, FSC_WRITE, FSC_PEEK).  For read and write calls,
 * "data" will identify the user buffer to use; for peek calls, "data" is set
 * to NULL.  In all cases, this function returns the number of bytes
 * successfully transferred, 0 on end-of-file conditions, and a negative error
 * code if no bytes could be transferred due to an error.  Dirty data is not
 * flushed immediately, and thus, a successful write only indicates that the
 * data have been taken in by the cache (for immediate I/O, a character device
 * would have to be used, but MINIX3 no longer supports this), which may be
 * follwed later by silent failures, including undetected end-of-file cases.
 * In particular, write requests may or may not return 0 (EOF) immediately when
 * writing at or beyond the block device's size. i Since block I/O takes place
 * at block granularity, block-unaligned writes have to read a block from disk
 * before updating it, and that is the only possible source of actual I/O
 * errors for write calls.
 * TODO: reconsider the buffering-only approach, or see if we can at least
 * somehow throw accurate EOF errors without reading in each block first.
 */
ssize_t
lmfs_bio(dev_t dev, struct fsdriver_data * data, size_t bytes, off_t pos,
	int call)
{
	block_t block, blocks_left;
	size_t block_size, off, block_off, chunk;
	struct buf *bp;
	int r, write, how;

	if (dev == NO_DEV)
		return EINVAL;

	block_size = lmfs_fs_block_size();
	write = (call == FSC_WRITE);

	assert(block_size > 0);

	/* FIXME: block_t is 32-bit, so we have to impose a limit here. */
	if (pos < 0 || pos / block_size > UINT32_MAX || bytes > SSIZE_MAX)
		return EINVAL;

	off = 0;
	block = pos / block_size;
	block_off = (size_t)(pos % block_size);
	blocks_left = howmany(block_off + bytes, block_size);

	lmfs_reset_rdwt_err();
	r = OK;

	for (off = 0; off < bytes; off += chunk) {
		chunk = block_size - block_off;
		if (chunk > bytes - off)
			chunk = bytes - off;

		/*
		 * For read requests, help the block driver form larger I/O
		 * requests.
		 */
		if (!write)
			block_prefetch(dev, block, blocks_left);

		/*
		 * Do not read the block from disk if we will end up
		 * overwriting all of its contents.
		 */
		how = (write && chunk == block_size) ? NO_READ : NORMAL;

		bp = lmfs_get_block(dev, block, how);
		assert(bp);

		r = lmfs_rdwt_err();

		if (r == OK && data != NULL) {
			assert(lmfs_dev(bp) != NO_DEV);

			if (write) {
				r = fsdriver_copyin(data, off,
				    (char *)bp->data + block_off, chunk);

				/*
				 * Mark the block as dirty even if the copy
				 * failed, since the copy may in fact have
				 * succeeded partially.  This is an interface
				 * issue that should be resolved at some point,
				 * but for now we do not want the cache to be
				 * desynchronized from the disk contents.
				 */
				lmfs_markdirty(bp);
			} else
				r = fsdriver_copyout(data, off,
				    (char *)bp->data + block_off, chunk);
		}

		lmfs_put_block(bp, FULL_DATA_BLOCK);

		if (r != OK)
			break;

		block++;
		block_off = 0;
		blocks_left--;
	}

	/*
	 * If we were not able to do any I/O, return the error (or EOF, even
	 * for writes).  Otherwise, return how many bytes we did manage to
	 * transfer.
	 */
	if (r != OK && off == 0)
		return (r == END_OF_FILE) ? 0 : r;

	return off;
}
Beispiel #8
0
ssize_t fs_read(ino_t ino_nr, struct fsdriver_data *data, size_t bytes,
	off_t pos, int __unused call)
{
	size_t off, chunk, block_size, cum_io;
	off_t f_size;
	struct inode *i_node;
	struct buf *bp;
	int r;

	/* Try to get inode according to its index. */
	if ((i_node = find_inode(ino_nr)) == NULL)
		return EINVAL; /* No inode found. */

	f_size = i_node->i_stat.st_size;
	if (pos >= f_size)
		return 0; /* EOF */

	/* Limit the request to the remainder of the file size. */
	if ((off_t)bytes > f_size - pos)
		bytes = (size_t)(f_size - pos);

	block_size = v_pri.logical_block_size_l;
	cum_io = 0;

	lmfs_reset_rdwt_err();
	r = OK;

	/* Split the transfer into chunks that don't span two blocks. */
	while (bytes > 0) {
		off = pos % block_size;

		chunk = block_size - off;
		if (chunk > bytes)
			chunk = bytes;

		/* Read 'chunk' bytes. */
		bp = read_extent_block(i_node->extent, pos / block_size);
		if (bp == NULL)
			panic("bp not valid in rw_chunk; this can't happen");

		r = fsdriver_copyout(data, cum_io, b_data(bp)+off, chunk);

		lmfs_put_block(bp, FULL_DATA_BLOCK);

		if (r != OK)
			break;  /* EOF reached. */
		if (lmfs_rdwt_err() < 0)
			break;

		/* Update counters and pointers. */
		bytes -= chunk;		/* Bytes yet to be read. */
		cum_io += chunk;	/* Bytes read so far. */
		pos += chunk;		/* Position within the file. */
	}

	if (lmfs_rdwt_err() != OK)
		r = lmfs_rdwt_err();	/* Check for disk error. */
	if (lmfs_rdwt_err() == END_OF_FILE)
		r = OK;

	return (r == OK) ? cum_io : r;
}
Beispiel #9
0
/*===========================================================================*
 *				lmfs_rw_scattered			     *
 *===========================================================================*/
void lmfs_rw_scattered(
  dev_t dev,			/* major-minor device number */
  struct buf **bufq,		/* pointer to array of buffers */
  int bufqsize,			/* number of buffers */
  int rw_flag			/* READING or WRITING */
)
{
/* Read or write scattered data from a device. */

  register struct buf *bp;
  int gap;
  register int i;
  register iovec_t *iop;
  static iovec_t iovec[NR_IOREQS];
  off_t pos;
  int iov_per_block;
  int start_in_use = bufs_in_use, start_bufqsize = bufqsize;

  assert(bufqsize >= 0);
  if(bufqsize == 0) return;

  /* for READING, check all buffers on the list are obtained and held
   * (count > 0)
   */
  if (rw_flag == READING) {
	for(i = 0; i < bufqsize; i++) {
		assert(bufq[i] != NULL);
		assert(bufq[i]->lmfs_count > 0);
  	}

  	/* therefore they are all 'in use' and must be at least this many */
	  assert(start_in_use >= start_bufqsize);
  }

  assert(dev != NO_DEV);
  assert(fs_block_size > 0);
  iov_per_block = roundup(fs_block_size, PAGE_SIZE) / PAGE_SIZE;
  assert(iov_per_block < NR_IOREQS);
  
  /* (Shell) sort buffers on lmfs_blocknr. */
  gap = 1;
  do
	gap = 3 * gap + 1;
  while (gap <= bufqsize);
  while (gap != 1) {
  	int j;
	gap /= 3;
	for (j = gap; j < bufqsize; j++) {
		for (i = j - gap;
		     i >= 0 && bufq[i]->lmfs_blocknr > bufq[i + gap]->lmfs_blocknr;
		     i -= gap) {
			bp = bufq[i];
			bufq[i] = bufq[i + gap];
			bufq[i + gap] = bp;
		}
	}
  }

  /* Set up I/O vector and do I/O.  The result of bdev I/O is OK if everything
   * went fine, otherwise the error code for the first failed transfer.
   */
  while (bufqsize > 0) {
  	int nblocks = 0, niovecs = 0;
	int r;
	for (iop = iovec; nblocks < bufqsize; nblocks++) {
		int p;
		vir_bytes vdata, blockrem;
		bp = bufq[nblocks];
		if (bp->lmfs_blocknr != (block_t) bufq[0]->lmfs_blocknr + nblocks)
			break;
		if(niovecs >= NR_IOREQS-iov_per_block) break;
		vdata = (vir_bytes) bp->data;
		blockrem = fs_block_size;
		for(p = 0; p < iov_per_block; p++) {
			vir_bytes chunk = blockrem < PAGE_SIZE ? blockrem : PAGE_SIZE;
			iop->iov_addr = vdata;
			iop->iov_size = chunk;
			vdata += PAGE_SIZE;
			blockrem -= chunk;
			iop++;
			niovecs++;
		}
		assert(p == iov_per_block);
		assert(blockrem == 0);
	}

	assert(nblocks > 0);
	assert(niovecs > 0);

	pos = (off_t)bufq[0]->lmfs_blocknr * fs_block_size;
	if (rw_flag == READING)
		r = bdev_gather(dev, pos, iovec, niovecs, BDEV_NOFLAGS);
	else
		r = bdev_scatter(dev, pos, iovec, niovecs, BDEV_NOFLAGS);

	/* Harvest the results.  The driver may have returned an error, or it
	 * may have done less than what we asked for.
	 */
	if (r < 0) {
		printf("fs cache: I/O error %d on device %d/%d, block %u\n",
			r, major(dev), minor(dev), bufq[0]->lmfs_blocknr);
	}
	for (i = 0; i < nblocks; i++) {
		bp = bufq[i];
		if (r < (ssize_t) fs_block_size) {
			/* Transfer failed. */
			if (i == 0) {
				bp->lmfs_dev = NO_DEV;	/* Invalidate block */
			}
			break;
		}
		if (rw_flag == READING) {
			bp->lmfs_dev = dev;	/* validate block */
			lmfs_put_block(bp, PARTIAL_DATA_BLOCK);
		} else {
			MARKCLEAN(bp);
		}
		r -= fs_block_size;
	}

	bufq += i;
	bufqsize -= i;

	if (rw_flag == READING) {
		/* Don't bother reading more than the device is willing to
		 * give at this time.  Don't forget to release those extras.
		 */
		while (bufqsize > 0) {
			lmfs_put_block(*bufq++, PARTIAL_DATA_BLOCK);
			bufqsize--;
		}
	}
	if (rw_flag == WRITING && i == 0) {
		/* We're not making progress, this means we might keep
		 * looping. Buffers remain dirty if un-written. Buffers are
		 * lost if invalidate()d or LRU-removed while dirty. This
		 * is better than keeping unwritable blocks around forever..
		 */
		break;
	}
  }

  if(rw_flag == READING) {
  	assert(start_in_use >= start_bufqsize);

	/* READING callers assume all bufs are released. */
	assert(start_in_use - start_bufqsize == bufs_in_use);
  }
}
/*===========================================================================*
 *				rw_scattered				     *
 *===========================================================================*/
static void rw_scattered(
  dev_t dev,			/* major-minor device number */
  struct buf **bufq,		/* pointer to array of buffers */
  unsigned int bufqsize,	/* number of buffers */
  int rw_flag			/* READING or WRITING */
)
{
/* Read or write scattered data from a device. */

  register struct buf *bp;
  register iovec_t *iop;
  static iovec_t iovec[NR_IOREQS];
  off_t pos;
  unsigned int i, iov_per_block;
#if !defined(NDEBUG)
  unsigned int start_in_use = bufs_in_use, start_bufqsize = bufqsize;
#endif /* !defined(NDEBUG) */

  if(bufqsize == 0) return;

#if !defined(NDEBUG)
  /* for READING, check all buffers on the list are obtained and held
   * (count > 0)
   */
  if (rw_flag == READING) {
	assert(bufqsize <= LMFS_MAX_PREFETCH);

	for(i = 0; i < bufqsize; i++) {
		assert(bufq[i] != NULL);
		assert(bufq[i]->lmfs_count > 0);
  	}

  	/* therefore they are all 'in use' and must be at least this many */
	assert(start_in_use >= start_bufqsize);
  }

  assert(dev != NO_DEV);
  assert(fs_block_size > 0);
  assert(howmany(fs_block_size, PAGE_SIZE) <= NR_IOREQS);
#endif /* !defined(NDEBUG) */

  /* For WRITING, (Shell) sort buffers on lmfs_blocknr.
   * For READING, the buffers are already sorted.
   */
  if (rw_flag == WRITING)
	sort_blocks(bufq, bufqsize);

  /* Set up I/O vector and do I/O.  The result of bdev I/O is OK if everything
   * went fine, otherwise the error code for the first failed transfer.
   */
  while (bufqsize > 0) {
	unsigned int p, nblocks = 0, niovecs = 0;
	int r;
	for (iop = iovec; nblocks < bufqsize; nblocks++) {
		vir_bytes vdata, blockrem;
		bp = bufq[nblocks];
		if (bp->lmfs_blocknr != bufq[0]->lmfs_blocknr + nblocks)
			break;
		blockrem = bp->lmfs_bytes;
		iov_per_block = howmany(blockrem, PAGE_SIZE);
		if (niovecs > NR_IOREQS - iov_per_block) break;
		vdata = (vir_bytes) bp->data;
		for(p = 0; p < iov_per_block; p++) {
			vir_bytes chunk =
			    blockrem < PAGE_SIZE ? blockrem : PAGE_SIZE;
			iop->iov_addr = vdata;
			iop->iov_size = chunk;
			vdata += PAGE_SIZE;
			blockrem -= chunk;
			iop++;
			niovecs++;
		}
		assert(p == iov_per_block);
		assert(blockrem == 0);
	}

	assert(nblocks > 0);
	assert(niovecs > 0 && niovecs <= NR_IOREQS);

	pos = (off_t)bufq[0]->lmfs_blocknr * fs_block_size;
	if (rw_flag == READING)
		r = bdev_gather(dev, pos, iovec, niovecs, BDEV_NOFLAGS);
	else
		r = bdev_scatter(dev, pos, iovec, niovecs, BDEV_NOFLAGS);

	/* Harvest the results.  The driver may have returned an error, or it
	 * may have done less than what we asked for.
	 */
	if (r < 0) {
		printf("fs cache: I/O error %d on device %d/%d, "
		    "block %"PRIu64"\n",
		    r, major(dev), minor(dev), bufq[0]->lmfs_blocknr);
	}
	for (i = 0; i < nblocks; i++) {
		bp = bufq[i];
		if (r < (ssize_t)bp->lmfs_bytes) {
			/* Transfer failed. */
			if (i == 0) {
				bp->lmfs_dev = NO_DEV;	/* Invalidate block */
			}
			break;
		}
		if (rw_flag == READING) {
			lmfs_put_block(bp);
		} else {
			MARKCLEAN(bp);
		}
		r -= bp->lmfs_bytes;
	}

	bufq += i;
	bufqsize -= i;

	if (rw_flag == READING) {
		/* Don't bother reading more than the device is willing to
		 * give at this time.  Don't forget to release those extras.
		 */
		while (bufqsize > 0) {
			bp = *bufq++;
			bp->lmfs_dev = NO_DEV;	/* invalidate block */
			lmfs_put_block(bp);
			bufqsize--;
		}
	}
	if (rw_flag == WRITING && i == 0) {
		/* We're not making progress, this means we might keep
		 * looping. Buffers remain dirty if un-written. Buffers are
		 * lost if invalidate()d or LRU-removed while dirty. This
		 * is better than keeping unwritable blocks around forever..
		 */
		break;
	}
  }

#if !defined(NDEBUG)
  if(rw_flag == READING) {
  	assert(start_in_use >= start_bufqsize);

	/* READING callers assume all bufs are released. */
	assert(start_in_use - start_bufqsize == bufs_in_use);
  }
#endif /* !defined(NDEBUG) */
}
Beispiel #11
0
void read_inode_extents(struct inode *i,
	const struct iso9660_dir_record *dir_rec,
	struct dir_extent *extent, size_t *offset)
{
	struct buf *bp;
	struct iso9660_dir_record *extent_rec;
	struct dir_extent *cur_extent = i->extent;
	int done = FALSE;

	/*
	 * No need to search extents if file is empty or has final directory
	 * record flag set.
	 */
	if (cur_extent == NULL ||
	    ((dir_rec->file_flags & D_NOT_LAST_EXTENT) == 0))
		return;

	while (!done) {
		bp = fetch_inode(extent, offset);
		if (bp == NULL)
			return;

		bp = read_extent_block(extent,
		    *offset / v_pri.logical_block_size_l);
		extent_rec = (struct iso9660_dir_record*)(b_data(bp) +
		    *offset % v_pri.logical_block_size_l);

		if (check_dir_record(dir_rec,
		    *offset % v_pri.logical_block_size_l) != OK) {
			lmfs_put_block(bp, FULL_DATA_BLOCK);
			return;
		}

		/* Extent entries should share the same name. */
		if ((dir_rec->length_file_id == extent_rec->length_file_id) &&
		    (memcmp(dir_rec->file_id, extent_rec->file_id,
		    dir_rec->length_file_id) == 0)) {
			/* Add the extent at the end of the linked list. */
			assert(cur_extent->next == NULL);
			cur_extent->next = alloc_extent();
			cur_extent->next->location = dir_rec->loc_extent_l +
			    dir_rec->ext_attr_rec_length;
			cur_extent->next->length = dir_rec->data_length_l /
			    v_pri.logical_block_size_l;
			if (dir_rec->data_length_l % v_pri.logical_block_size_l)
				cur_extent->next->length++;

			i->i_stat.st_size += dir_rec->data_length_l;
			i->i_stat.st_blocks += cur_extent->next->length;

			cur_extent = cur_extent->next;
			*offset += extent_rec->length;
		}
		else
			done = TRUE;

		/* Check if not last extent bit is not set. */
		if ((dir_rec->file_flags & D_NOT_LAST_EXTENT) == 0)
			done = TRUE;

		lmfs_put_block(bp, FULL_DATA_BLOCK);
	}
}
Beispiel #12
0
int parse_susp(struct rrii_dir_record *dir, char *buffer)
{
	/* Parse fundamental SUSP entries */
	char susp_signature[2];
	u8_t susp_length;
	u8_t susp_version;

	u32_t ca_block_nr;
	u32_t ca_offset;
	u32_t ca_length;
	struct buf *ca_bp;

	susp_signature[0] = buffer[0];
	susp_signature[1] = buffer[1];
	susp_length = *((u8_t*)buffer + 2);
	susp_version = *((u8_t*)buffer + 3);

	if ((susp_signature[0] == 'C') && (susp_signature[1] == 'E') &&
	    (susp_length >= 28) && (susp_version >= 1)) {
		/*
		 * Continuation area, perform a recursion.
		 *
		 * FIXME: Currently we're parsing only first logical block of a
		 * continuation area, and infinite recursion is not checked.
		 */

		ca_block_nr = *((u32_t*)(buffer + 4));
		ca_offset = *((u32_t*)(buffer + 12));
		ca_length = *((u32_t*)(buffer + 20));

		/* Truncate continuation area to fit one logical block. */
		if (ca_offset >= v_pri.logical_block_size_l) {
			return EINVAL;
		}
		if (ca_offset + ca_length > v_pri.logical_block_size_l) {
			ca_length = v_pri.logical_block_size_l - ca_offset;
		}

		ca_bp = lmfs_get_block(fs_dev, ca_block_nr, NORMAL);
		if (ca_bp == NULL) {
			return EINVAL;
		}

		parse_susp_buffer(dir, b_data(ca_bp) + ca_offset, ca_length);
		lmfs_put_block(ca_bp, FULL_DATA_BLOCK);

		return OK;
	}
	else if ((susp_signature[0] == 'P') && (susp_signature[1] == 'D')) {
		/* Padding, skip. */
		return OK;
	}
	else if ((susp_signature[0] == 'S') && (susp_signature[1] == 'P')) {
		/* Ignored, skip. */
		return OK;
	}
	else if ((susp_signature[0] == 'S') && (susp_signature[1] == 'T')) {
		/* Terminator entry, stop processing. */
		return(ECANCELED);
	}
	else if ((susp_signature[0] == 'E') && (susp_signature[1] == 'R')) {
		/* Ignored, skip. */
		return OK;
	}
	else if ((susp_signature[0] == 'E') && (susp_signature[1] == 'S')) {
		/* Ignored, skip. */
		return OK;
	}

	/* Not a SUSP fundamental entry. */
	return EINVAL;
}