コード例 #1
0
ファイル: xfs_dir2_block.c プロジェクト: Addision/LVS
/*
 * Convert the shortform directory to block form.
 */
int						/* error */
xfs_dir2_sf_to_block(
	xfs_da_args_t		*args)		/* operation arguments */
{
	xfs_dir2_db_t		blkno;		/* dir-relative block # (0) */
	xfs_dir2_block_t	*block;		/* block structure */
	xfs_dir2_leaf_entry_t	*blp;		/* block leaf entries */
	xfs_dabuf_t		*bp;		/* block buffer */
	xfs_dir2_block_tail_t	*btp;		/* block tail pointer */
	char			*buf;		/* sf buffer */
	int			buf_len;
	xfs_dir2_data_entry_t	*dep;		/* data entry pointer */
	xfs_inode_t		*dp;		/* incore directory inode */
	int			dummy;		/* trash */
	xfs_dir2_data_unused_t	*dup;		/* unused entry pointer */
	int			endoffset;	/* end of data objects */
	int			error;		/* error return value */
	int			i;		/* index */
	xfs_mount_t		*mp;		/* filesystem mount point */
	int			needlog;	/* need to log block header */
	int			needscan;	/* need to scan block freespc */
	int			newoffset;	/* offset from current entry */
	int			offset;		/* target block offset */
	xfs_dir2_sf_entry_t	*sfep;		/* sf entry pointer */
	xfs_dir2_sf_t		*sfp;		/* shortform structure */
	__be16			*tagp;		/* end of data entry */
	xfs_trans_t		*tp;		/* transaction pointer */
	struct xfs_name		name;

	trace_xfs_dir2_sf_to_block(args);

	dp = args->dp;
	tp = args->trans;
	mp = dp->i_mount;
	ASSERT(dp->i_df.if_flags & XFS_IFINLINE);
	/*
	 * Bomb out if the shortform directory is way too short.
	 */
	if (dp->i_d.di_size < offsetof(xfs_dir2_sf_hdr_t, parent)) {
		ASSERT(XFS_FORCED_SHUTDOWN(mp));
		return XFS_ERROR(EIO);
	}
	ASSERT(dp->i_df.if_bytes == dp->i_d.di_size);
	ASSERT(dp->i_df.if_u1.if_data != NULL);
	sfp = (xfs_dir2_sf_t *)dp->i_df.if_u1.if_data;
	ASSERT(dp->i_d.di_size >= xfs_dir2_sf_hdr_size(sfp->hdr.i8count));
	/*
	 * Copy the directory into the stack buffer.
	 * Then pitch the incore inode data so we can make extents.
	 */

	buf_len = dp->i_df.if_bytes;
	buf = kmem_alloc(dp->i_df.if_bytes, KM_SLEEP);

	memcpy(buf, sfp, dp->i_df.if_bytes);
	xfs_idata_realloc(dp, -dp->i_df.if_bytes, XFS_DATA_FORK);
	dp->i_d.di_size = 0;
	xfs_trans_log_inode(tp, dp, XFS_ILOG_CORE);
	/*
	 * Reset pointer - old sfp is gone.
	 */
	sfp = (xfs_dir2_sf_t *)buf;
	/*
	 * Add block 0 to the inode.
	 */
	error = xfs_dir2_grow_inode(args, XFS_DIR2_DATA_SPACE, &blkno);
	if (error) {
		kmem_free(buf);
		return error;
	}
	/*
	 * Initialize the data block.
	 */
	error = xfs_dir2_data_init(args, blkno, &bp);
	if (error) {
		kmem_free(buf);
		return error;
	}
	block = bp->data;
	block->hdr.magic = cpu_to_be32(XFS_DIR2_BLOCK_MAGIC);
	/*
	 * Compute size of block "tail" area.
	 */
	i = (uint)sizeof(*btp) +
	    (sfp->hdr.count + 2) * (uint)sizeof(xfs_dir2_leaf_entry_t);
	/*
	 * The whole thing is initialized to free by the init routine.
	 * Say we're using the leaf and tail area.
	 */
	dup = (xfs_dir2_data_unused_t *)block->u;
	needlog = needscan = 0;
	xfs_dir2_data_use_free(tp, bp, dup, mp->m_dirblksize - i, i, &needlog,
		&needscan);
	ASSERT(needscan == 0);
	/*
	 * Fill in the tail.
	 */
	btp = xfs_dir2_block_tail_p(mp, block);
	btp->count = cpu_to_be32(sfp->hdr.count + 2);	/* ., .. */
	btp->stale = 0;
	blp = xfs_dir2_block_leaf_p(btp);
	endoffset = (uint)((char *)blp - (char *)block);
	/*
	 * Remove the freespace, we'll manage it.
	 */
	xfs_dir2_data_use_free(tp, bp, dup,
		(xfs_dir2_data_aoff_t)((char *)dup - (char *)block),
		be16_to_cpu(dup->length), &needlog, &needscan);
	/*
	 * Create entry for .
	 */
	dep = (xfs_dir2_data_entry_t *)
	      ((char *)block + XFS_DIR2_DATA_DOT_OFFSET);
	dep->inumber = cpu_to_be64(dp->i_ino);
	dep->namelen = 1;
	dep->name[0] = '.';
	tagp = xfs_dir2_data_entry_tag_p(dep);
	*tagp = cpu_to_be16((char *)dep - (char *)block);
	xfs_dir2_data_log_entry(tp, bp, dep);
	blp[0].hashval = cpu_to_be32(xfs_dir_hash_dot);
	blp[0].address = cpu_to_be32(xfs_dir2_byte_to_dataptr(mp,
				(char *)dep - (char *)block));
	/*
	 * Create entry for ..
	 */
	dep = (xfs_dir2_data_entry_t *)
		((char *)block + XFS_DIR2_DATA_DOTDOT_OFFSET);
	dep->inumber = cpu_to_be64(xfs_dir2_sf_get_inumber(sfp, &sfp->hdr.parent));
	dep->namelen = 2;
	dep->name[0] = dep->name[1] = '.';
	tagp = xfs_dir2_data_entry_tag_p(dep);
	*tagp = cpu_to_be16((char *)dep - (char *)block);
	xfs_dir2_data_log_entry(tp, bp, dep);
	blp[1].hashval = cpu_to_be32(xfs_dir_hash_dotdot);
	blp[1].address = cpu_to_be32(xfs_dir2_byte_to_dataptr(mp,
				(char *)dep - (char *)block));
	offset = XFS_DIR2_DATA_FIRST_OFFSET;
	/*
	 * Loop over existing entries, stuff them in.
	 */
	if ((i = 0) == sfp->hdr.count)
		sfep = NULL;
	else
		sfep = xfs_dir2_sf_firstentry(sfp);
	/*
	 * Need to preserve the existing offset values in the sf directory.
	 * Insert holes (unused entries) where necessary.
	 */
	while (offset < endoffset) {
		/*
		 * sfep is null when we reach the end of the list.
		 */
		if (sfep == NULL)
			newoffset = endoffset;
		else
			newoffset = xfs_dir2_sf_get_offset(sfep);
		/*
		 * There should be a hole here, make one.
		 */
		if (offset < newoffset) {
			dup = (xfs_dir2_data_unused_t *)
			      ((char *)block + offset);
			dup->freetag = cpu_to_be16(XFS_DIR2_DATA_FREE_TAG);
			dup->length = cpu_to_be16(newoffset - offset);
			*xfs_dir2_data_unused_tag_p(dup) = cpu_to_be16(
				((char *)dup - (char *)block));
			xfs_dir2_data_log_unused(tp, bp, dup);
			(void)xfs_dir2_data_freeinsert((xfs_dir2_data_t *)block,
				dup, &dummy);
			offset += be16_to_cpu(dup->length);
			continue;
		}
		/*
		 * Copy a real entry.
		 */
		dep = (xfs_dir2_data_entry_t *)((char *)block + newoffset);
		dep->inumber = cpu_to_be64(xfs_dir2_sf_get_inumber(sfp,
				xfs_dir2_sf_inumberp(sfep)));
		dep->namelen = sfep->namelen;
		memcpy(dep->name, sfep->name, dep->namelen);
		tagp = xfs_dir2_data_entry_tag_p(dep);
		*tagp = cpu_to_be16((char *)dep - (char *)block);
		xfs_dir2_data_log_entry(tp, bp, dep);
		name.name = sfep->name;
		name.len = sfep->namelen;
		blp[2 + i].hashval = cpu_to_be32(mp->m_dirnameops->
							hashname(&name));
		blp[2 + i].address = cpu_to_be32(xfs_dir2_byte_to_dataptr(mp,
						 (char *)dep - (char *)block));
		offset = (int)((char *)(tagp + 1) - (char *)block);
		if (++i == sfp->hdr.count)
			sfep = NULL;
		else
			sfep = xfs_dir2_sf_nextentry(sfp, sfep);
	}
	/* Done with the temporary buffer */
	kmem_free(buf);
	/*
	 * Sort the leaf entries by hash value.
	 */
	xfs_sort(blp, be32_to_cpu(btp->count), sizeof(*blp), xfs_dir2_block_sort);
	/*
	 * Log the leaf entry area and tail.
	 * Already logged the header in data_init, ignore needlog.
	 */
	ASSERT(needscan == 0);
	xfs_dir2_block_log_leaf(tp, bp, 0, be32_to_cpu(btp->count) - 1);
	xfs_dir2_block_log_tail(tp, bp);
	xfs_dir2_data_check(dp, bp);
	xfs_da_buf_done(bp);
	return 0;
}
コード例 #2
0
ファイル: agheader.c プロジェクト: Claruarius/stblinux-2.6.37
static int
verify_set_agf(xfs_mount_t *mp, xfs_agf_t *agf, xfs_agnumber_t i)
{
	xfs_drfsbno_t agblocks;
	int retval = 0;

	/* check common fields */

	if (be32_to_cpu(agf->agf_magicnum) != XFS_AGF_MAGIC)  {
		retval = XR_AG_AGF;
		do_warn(_("bad magic # 0x%x for agf %d\n"),
			be32_to_cpu(agf->agf_magicnum), i);

		if (!no_modify)
			agf->agf_magicnum = cpu_to_be32(XFS_AGF_MAGIC);
	}

	if (!XFS_AGF_GOOD_VERSION(be32_to_cpu(agf->agf_versionnum)))  {
		retval = XR_AG_AGF;
		do_warn(_("bad version # %d for agf %d\n"),
			be32_to_cpu(agf->agf_versionnum), i);

		if (!no_modify)
			agf->agf_versionnum = cpu_to_be32(XFS_AGF_VERSION);
	}

	if (be32_to_cpu(agf->agf_seqno) != i)  {
		retval = XR_AG_AGF;
		do_warn(_("bad sequence # %d for agf %d\n"),
			be32_to_cpu(agf->agf_seqno), i);

		if (!no_modify)
			agf->agf_seqno = cpu_to_be32(i);
	}

	if (be32_to_cpu(agf->agf_length) != mp->m_sb.sb_agblocks)  {
		if (i != mp->m_sb.sb_agcount - 1)  {
			retval = XR_AG_AGF;
			do_warn(_("bad length %d for agf %d, should be %d\n"),
				be32_to_cpu(agf->agf_length), i,
				mp->m_sb.sb_agblocks);
			if (!no_modify)
				agf->agf_length = 
					cpu_to_be32(mp->m_sb.sb_agblocks);
		} else  {
			agblocks = mp->m_sb.sb_dblocks -
				(xfs_drfsbno_t) mp->m_sb.sb_agblocks * i;

			if (be32_to_cpu(agf->agf_length) != agblocks)  {
				retval = XR_AG_AGF;
				do_warn(
	_("bad length %d for agf %d, should be %" PRIu64 "\n"),
					be32_to_cpu(agf->agf_length),
						i, agblocks);
				if (!no_modify)
					agf->agf_length = cpu_to_be32(agblocks);
			}
		}
	}

	/*
	 * check first/last AGF fields.  if need be, lose the free
	 * space in the AGFL, we'll reclaim it later.
	 */
	if (be32_to_cpu(agf->agf_flfirst) >= XFS_AGFL_SIZE(mp))  {
		do_warn(_("flfirst %d in agf %d too large (max = %zu)\n"),
			be32_to_cpu(agf->agf_flfirst),
			i, XFS_AGFL_SIZE(mp));
		if (!no_modify)
			agf->agf_flfirst = cpu_to_be32(0);
	}

	if (be32_to_cpu(agf->agf_fllast) >= XFS_AGFL_SIZE(mp))  {
		do_warn(_("fllast %d in agf %d too large (max = %zu)\n"),
			be32_to_cpu(agf->agf_fllast),
			i, XFS_AGFL_SIZE(mp));
		if (!no_modify)
			agf->agf_fllast = cpu_to_be32(0);
	}

	/* don't check freespace btrees -- will be checked by caller */

	return(retval);
}
コード例 #3
0
ファイル: dlmunlock.c プロジェクト: CSCLOG/beaglebone
/*
 * locking:
 *   caller needs:  none
 *   taken:         takes and drops res->spinlock
 *   held on exit:  none
 * returns: DLM_NORMAL, DLM_BADARGS, DLM_IVLOCKID,
 *          return value from dlmunlock_master
 */
int dlm_unlock_lock_handler(struct o2net_msg *msg, u32 len, void *data,
			    void **ret_data)
{
	struct dlm_ctxt *dlm = data;
	struct dlm_unlock_lock *unlock = (struct dlm_unlock_lock *)msg->buf;
	struct dlm_lock_resource *res = NULL;
	struct list_head *iter;
	struct dlm_lock *lock = NULL;
	enum dlm_status status = DLM_NORMAL;
	int found = 0, i;
	struct dlm_lockstatus *lksb = NULL;
	int ignore;
	u32 flags;
	struct list_head *queue;

	flags = be32_to_cpu(unlock->flags);

	if (flags & LKM_GET_LVB) {
		mlog(ML_ERROR, "bad args!  GET_LVB specified on unlock!\n");
		return DLM_BADARGS;
	}

	if ((flags & (LKM_PUT_LVB|LKM_CANCEL)) == (LKM_PUT_LVB|LKM_CANCEL)) {
		mlog(ML_ERROR, "bad args!  cannot modify lvb on a CANCEL "
		     "request!\n");
		return DLM_BADARGS;
	}

	if (unlock->namelen > DLM_LOCKID_NAME_MAX) {
		mlog(ML_ERROR, "Invalid name length in unlock handler!\n");
		return DLM_IVBUFLEN;
	}

	if (!dlm_grab(dlm))
		return DLM_REJECTED;

	mlog_bug_on_msg(!dlm_domain_fully_joined(dlm),
			"Domain %s not fully joined!\n", dlm->name);

	mlog(0, "lvb: %s\n", flags & LKM_PUT_LVB ? "put lvb" : "none");

	res = dlm_lookup_lockres(dlm, unlock->name, unlock->namelen);
	if (!res) {
		/* We assume here that a no lock resource simply means
		 * it was migrated away and destroyed before the other
		 * node could detect it. */
		mlog(0, "returning DLM_FORWARD -- res no longer exists\n");
		status = DLM_FORWARD;
		goto not_found;
	}

	queue=&res->granted;
	found = 0;
	spin_lock(&res->spinlock);
	if (res->state & DLM_LOCK_RES_RECOVERING) {
		spin_unlock(&res->spinlock);
		mlog(0, "returning DLM_RECOVERING\n");
		status = DLM_RECOVERING;
		goto leave;
	}

	if (res->state & DLM_LOCK_RES_MIGRATING) {
		spin_unlock(&res->spinlock);
		mlog(0, "returning DLM_MIGRATING\n");
		status = DLM_MIGRATING;
		goto leave;
	}

	if (res->owner != dlm->node_num) {
		spin_unlock(&res->spinlock);
		mlog(0, "returning DLM_FORWARD -- not master\n");
		status = DLM_FORWARD;
		goto leave;
	}

	for (i=0; i<3; i++) {
		list_for_each(iter, queue) {
			lock = list_entry(iter, struct dlm_lock, list);
			if (lock->ml.cookie == unlock->cookie &&
		    	    lock->ml.node == unlock->node_idx) {
				dlm_lock_get(lock);
				found = 1;
				break;
			}
		}
		if (found)
			break;
		/* scan granted -> converting -> blocked queues */
		queue++;
	}
コード例 #4
0
ファイル: tpm-interface.c プロジェクト: AK101111/linux
int tpm_get_timeouts(struct tpm_chip *chip)
{
	struct tpm_cmd_t tpm_cmd;
	unsigned long new_timeout[4];
	unsigned long old_timeout[4];
	struct duration_t *duration_cap;
	ssize_t rc;

	if (chip->flags & TPM_CHIP_FLAG_TPM2) {
		/* Fixed timeouts for TPM2 */
		chip->timeout_a = msecs_to_jiffies(TPM2_TIMEOUT_A);
		chip->timeout_b = msecs_to_jiffies(TPM2_TIMEOUT_B);
		chip->timeout_c = msecs_to_jiffies(TPM2_TIMEOUT_C);
		chip->timeout_d = msecs_to_jiffies(TPM2_TIMEOUT_D);
		chip->duration[TPM_SHORT] =
		    msecs_to_jiffies(TPM2_DURATION_SHORT);
		chip->duration[TPM_MEDIUM] =
		    msecs_to_jiffies(TPM2_DURATION_MEDIUM);
		chip->duration[TPM_LONG] =
		    msecs_to_jiffies(TPM2_DURATION_LONG);
		return 0;
	}

	tpm_cmd.header.in = tpm_getcap_header;
	tpm_cmd.params.getcap_in.cap = TPM_CAP_PROP;
	tpm_cmd.params.getcap_in.subcap_size = cpu_to_be32(4);
	tpm_cmd.params.getcap_in.subcap = TPM_CAP_PROP_TIS_TIMEOUT;
	rc = tpm_transmit_cmd(chip, &tpm_cmd, TPM_INTERNAL_RESULT_SIZE, NULL);

	if (rc == TPM_ERR_INVALID_POSTINIT) {
		/* The TPM is not started, we are the first to talk to it.
		   Execute a startup command. */
		dev_info(&chip->dev, "Issuing TPM_STARTUP");
		if (tpm_startup(chip, TPM_ST_CLEAR))
			return rc;

		tpm_cmd.header.in = tpm_getcap_header;
		tpm_cmd.params.getcap_in.cap = TPM_CAP_PROP;
		tpm_cmd.params.getcap_in.subcap_size = cpu_to_be32(4);
		tpm_cmd.params.getcap_in.subcap = TPM_CAP_PROP_TIS_TIMEOUT;
		rc = tpm_transmit_cmd(chip, &tpm_cmd, TPM_INTERNAL_RESULT_SIZE,
				  NULL);
	}
	if (rc) {
		dev_err(&chip->dev,
			"A TPM error (%zd) occurred attempting to determine the timeouts\n",
			rc);
		goto duration;
	}

	if (be32_to_cpu(tpm_cmd.header.out.return_code) != 0 ||
	    be32_to_cpu(tpm_cmd.header.out.length)
	    != sizeof(tpm_cmd.header.out) + sizeof(u32) + 4 * sizeof(u32))
		return -EINVAL;

	old_timeout[0] = be32_to_cpu(tpm_cmd.params.getcap_out.cap.timeout.a);
	old_timeout[1] = be32_to_cpu(tpm_cmd.params.getcap_out.cap.timeout.b);
	old_timeout[2] = be32_to_cpu(tpm_cmd.params.getcap_out.cap.timeout.c);
	old_timeout[3] = be32_to_cpu(tpm_cmd.params.getcap_out.cap.timeout.d);
	memcpy(new_timeout, old_timeout, sizeof(new_timeout));

	/*
	 * Provide ability for vendor overrides of timeout values in case
	 * of misreporting.
	 */
	if (chip->ops->update_timeouts != NULL)
		chip->timeout_adjusted =
			chip->ops->update_timeouts(chip, new_timeout);

	if (!chip->timeout_adjusted) {
		/* Don't overwrite default if value is 0 */
		if (new_timeout[0] != 0 && new_timeout[0] < 1000) {
			int i;

			/* timeouts in msec rather usec */
			for (i = 0; i != ARRAY_SIZE(new_timeout); i++)
				new_timeout[i] *= 1000;
			chip->timeout_adjusted = true;
		}
	}

	/* Report adjusted timeouts */
	if (chip->timeout_adjusted) {
		dev_info(&chip->dev,
			 HW_ERR "Adjusting reported timeouts: A %lu->%luus B %lu->%luus C %lu->%luus D %lu->%luus\n",
			 old_timeout[0], new_timeout[0],
			 old_timeout[1], new_timeout[1],
			 old_timeout[2], new_timeout[2],
			 old_timeout[3], new_timeout[3]);
	}

	chip->timeout_a = usecs_to_jiffies(new_timeout[0]);
	chip->timeout_b = usecs_to_jiffies(new_timeout[1]);
	chip->timeout_c = usecs_to_jiffies(new_timeout[2]);
	chip->timeout_d = usecs_to_jiffies(new_timeout[3]);

duration:
	tpm_cmd.header.in = tpm_getcap_header;
	tpm_cmd.params.getcap_in.cap = TPM_CAP_PROP;
	tpm_cmd.params.getcap_in.subcap_size = cpu_to_be32(4);
	tpm_cmd.params.getcap_in.subcap = TPM_CAP_PROP_TIS_DURATION;

	rc = tpm_transmit_cmd(chip, &tpm_cmd, TPM_INTERNAL_RESULT_SIZE,
			      "attempting to determine the durations");
	if (rc)
		return rc;

	if (be32_to_cpu(tpm_cmd.header.out.return_code) != 0 ||
	    be32_to_cpu(tpm_cmd.header.out.length)
	    != sizeof(tpm_cmd.header.out) + sizeof(u32) + 3 * sizeof(u32))
		return -EINVAL;

	duration_cap = &tpm_cmd.params.getcap_out.cap.duration;
	chip->duration[TPM_SHORT] =
	    usecs_to_jiffies(be32_to_cpu(duration_cap->tpm_short));
	chip->duration[TPM_MEDIUM] =
	    usecs_to_jiffies(be32_to_cpu(duration_cap->tpm_medium));
	chip->duration[TPM_LONG] =
	    usecs_to_jiffies(be32_to_cpu(duration_cap->tpm_long));

	/* The Broadcom BCM0102 chipset in a Dell Latitude D820 gets the above
	 * value wrong and apparently reports msecs rather than usecs. So we
	 * fix up the resulting too-small TPM_SHORT value to make things work.
	 * We also scale the TPM_MEDIUM and -_LONG values by 1000.
	 */
	if (chip->duration[TPM_SHORT] < (HZ / 100)) {
		chip->duration[TPM_SHORT] = HZ;
		chip->duration[TPM_MEDIUM] *= 1000;
		chip->duration[TPM_LONG] *= 1000;
		chip->duration_adjusted = true;
		dev_info(&chip->dev, "Adjusting TPM timeout parameters.");
	}
	return 0;
}
コード例 #5
0
int nbd_receive_negotiate(int csock, const char *name, uint32_t *flags,
                          off_t *size, size_t *blocksize)
{
    char buf[256];
    uint64_t magic, s;
    uint16_t tmp;
    int rc;

    TRACE("Receiving negotiation.");

    socket_set_block(csock);
    rc = -EINVAL;

    if (read_sync(csock, buf, 8) != 8) {
        LOG("read failed");
        goto fail;
    }

    buf[8] = '\0';
    if (strlen(buf) == 0) {
        LOG("server connection closed");
        goto fail;
    }

    TRACE("Magic is %c%c%c%c%c%c%c%c",
          qemu_isprint(buf[0]) ? buf[0] : '.',
          qemu_isprint(buf[1]) ? buf[1] : '.',
          qemu_isprint(buf[2]) ? buf[2] : '.',
          qemu_isprint(buf[3]) ? buf[3] : '.',
          qemu_isprint(buf[4]) ? buf[4] : '.',
          qemu_isprint(buf[5]) ? buf[5] : '.',
          qemu_isprint(buf[6]) ? buf[6] : '.',
          qemu_isprint(buf[7]) ? buf[7] : '.');

    if (memcmp(buf, "NBDMAGIC", 8) != 0) {
        LOG("Invalid magic received");
        goto fail;
    }

    if (read_sync(csock, &magic, sizeof(magic)) != sizeof(magic)) {
        LOG("read failed");
        goto fail;
    }
    magic = be64_to_cpu(magic);
    TRACE("Magic is 0x%" PRIx64, magic);

    if (name) {
        uint32_t reserved = 0;
        uint32_t opt;
        uint32_t namesize;

        TRACE("Checking magic (opts_magic)");
        if (magic != 0x49484156454F5054LL) {
            LOG("Bad magic received");
            goto fail;
        }
        if (read_sync(csock, &tmp, sizeof(tmp)) != sizeof(tmp)) {
            LOG("flags read failed");
            goto fail;
        }
        *flags = be16_to_cpu(tmp) << 16;
        /* reserved for future use */
        if (write_sync(csock, &reserved, sizeof(reserved)) !=
            sizeof(reserved)) {
            LOG("write failed (reserved)");
            goto fail;
        }
        /* write the export name */
        magic = cpu_to_be64(magic);
        if (write_sync(csock, &magic, sizeof(magic)) != sizeof(magic)) {
            LOG("write failed (magic)");
            goto fail;
        }
        opt = cpu_to_be32(NBD_OPT_EXPORT_NAME);
        if (write_sync(csock, &opt, sizeof(opt)) != sizeof(opt)) {
            LOG("write failed (opt)");
            goto fail;
        }
        namesize = cpu_to_be32(strlen(name));
        if (write_sync(csock, &namesize, sizeof(namesize)) !=
            sizeof(namesize)) {
            LOG("write failed (namesize)");
            goto fail;
        }
        if (write_sync(csock, (char*)name, strlen(name)) != strlen(name)) {
            LOG("write failed (name)");
            goto fail;
        }
    } else {
        TRACE("Checking magic (cli_magic)");

        if (magic != 0x00420281861253LL) {
            LOG("Bad magic received");
            goto fail;
        }
    }

    if (read_sync(csock, &s, sizeof(s)) != sizeof(s)) {
        LOG("read failed");
        goto fail;
    }
    *size = be64_to_cpu(s);
    *blocksize = 1024;
    TRACE("Size is %" PRIu64, *size);

    if (!name) {
        if (read_sync(csock, flags, sizeof(*flags)) != sizeof(*flags)) {
            LOG("read failed (flags)");
            goto fail;
        }
        *flags = be32_to_cpup(flags);
    } else {
        if (read_sync(csock, &tmp, sizeof(tmp)) != sizeof(tmp)) {
            LOG("read failed (tmp)");
            goto fail;
        }
        *flags |= be32_to_cpu(tmp);
    }
    if (read_sync(csock, &buf, 124) != 124) {
        LOG("read failed (buf)");
        goto fail;
    }
    rc = 0;

fail:
    socket_set_nonblock(csock);
    return rc;
}
コード例 #6
0
static int mtdsplit_parse_tplink(struct mtd_info *master,
				struct mtd_partition **pparts,
				struct mtd_part_parser_data *data)
{
	struct tplink_fw_header hdr;
	size_t hdr_len, retlen, kernel_size;
	size_t rootfs_offset;
	struct mtd_partition *parts;
	int err;

	hdr_len = sizeof(hdr);
	err = mtd_read(master, 0, hdr_len, &retlen, (void *) &hdr);
	if (err)
		return err;

	if (retlen != hdr_len)
		return -EIO;

	switch (le32_to_cpu(hdr.version)) {
	case 1:
		if (be32_to_cpu(hdr.v1.kernel_ofs) != sizeof(hdr))
			return -EINVAL;

		kernel_size = sizeof(hdr) + be32_to_cpu(hdr.v1.kernel_len);
		break;
	case 2:
	case 3:
		if (be32_to_cpu(hdr.v2.kernel_ofs) != sizeof(hdr))
			return -EINVAL;

		kernel_size = sizeof(hdr) + be32_to_cpu(hdr.v2.kernel_len);
		break;
	default:
		return -EINVAL;
	}

	if (kernel_size > master->size)
		return -EINVAL;

	/* Find the rootfs after the kernel. */
	err = mtd_check_rootfs_magic(master, kernel_size, NULL);
	if (!err) {
		rootfs_offset = kernel_size;
	} else {
		/*
		 * The size in the header might cover the rootfs as well.
		 * Start the search from an arbitrary offset.
		 */
		err = mtd_find_rootfs_from(master, TPLINK_MIN_ROOTFS_OFFS,
					   master->size, &rootfs_offset, NULL);
		if (err)
			return err;
	}

	parts = kzalloc(TPLINK_NR_PARTS * sizeof(*parts), GFP_KERNEL);
	if (!parts)
		return -ENOMEM;

	parts[0].name = KERNEL_PART_NAME;
	parts[0].offset = 0;
	parts[0].size = rootfs_offset;

	parts[1].name = ROOTFS_PART_NAME;
	parts[1].offset = rootfs_offset;
	parts[1].size = master->size - rootfs_offset;

	*pparts = parts;
	return TPLINK_NR_PARTS;
}
コード例 #7
0
ファイル: xfs_ialloc.c プロジェクト: 3null/fastsocket
/*
 * Allocate an inode on disk.
 * Mode is used to tell whether the new inode will need space, and whether
 * it is a directory.
 *
 * The arguments IO_agbp and alloc_done are defined to work within
 * the constraint of one allocation per transaction.
 * xfs_dialloc() is designed to be called twice if it has to do an
 * allocation to make more free inodes.  On the first call,
 * IO_agbp should be set to NULL. If an inode is available,
 * i.e., xfs_dialloc() did not need to do an allocation, an inode
 * number is returned.  In this case, IO_agbp would be set to the
 * current ag_buf and alloc_done set to false.
 * If an allocation needed to be done, xfs_dialloc would return
 * the current ag_buf in IO_agbp and set alloc_done to true.
 * The caller should then commit the current transaction, allocate a new
 * transaction, and call xfs_dialloc() again, passing in the previous
 * value of IO_agbp.  IO_agbp should be held across the transactions.
 * Since the agbp is locked across the two calls, the second call is
 * guaranteed to have a free inode available.
 *
 * Once we successfully pick an inode its number is returned and the
 * on-disk data structures are updated.  The inode itself is not read
 * in, since doing so would break ordering constraints with xfs_reclaim.
 */
int
xfs_dialloc(
	xfs_trans_t	*tp,		/* transaction pointer */
	xfs_ino_t	parent,		/* parent inode (directory) */
	mode_t		mode,		/* mode bits for new inode */
	int		okalloc,	/* ok to allocate more space */
	xfs_buf_t	**IO_agbp,	/* in/out ag header's buffer */
	boolean_t	*alloc_done,	/* true if we needed to replenish
					   inode freelist */
	xfs_ino_t	*inop)		/* inode number allocated */
{
	xfs_agnumber_t	agcount;	/* number of allocation groups */
	xfs_buf_t	*agbp;		/* allocation group header's buffer */
	xfs_agnumber_t	agno;		/* allocation group number */
	xfs_agi_t	*agi;		/* allocation group header structure */
	xfs_btree_cur_t	*cur;		/* inode allocation btree cursor */
	int		error;		/* error return value */
	int		i;		/* result code */
	int		ialloced;	/* inode allocation status */
	int		noroom = 0;	/* no space for inode blk allocation */
	xfs_ino_t	ino;		/* fs-relative inode to be returned */
	/* REFERENCED */
	int		j;		/* result code */
	xfs_mount_t	*mp;		/* file system mount structure */
	int		offset;		/* index of inode in chunk */
	xfs_agino_t	pagino;		/* parent's AG relative inode # */
	xfs_agnumber_t	pagno;		/* parent's AG number */
	xfs_inobt_rec_incore_t rec;	/* inode allocation record */
	xfs_agnumber_t	tagno;		/* testing allocation group number */
	xfs_btree_cur_t	*tcur;		/* temp cursor */
	xfs_inobt_rec_incore_t trec;	/* temp inode allocation record */
	struct xfs_perag *pag;


	if (*IO_agbp == NULL) {
		/*
		 * We do not have an agbp, so select an initial allocation
		 * group for inode allocation.
		 */
		agbp = xfs_ialloc_ag_select(tp, parent, mode, okalloc);
		/*
		 * Couldn't find an allocation group satisfying the
		 * criteria, give up.
		 */
		if (!agbp) {
			*inop = NULLFSINO;
			return 0;
		}
		agi = XFS_BUF_TO_AGI(agbp);
		ASSERT(be32_to_cpu(agi->agi_magicnum) == XFS_AGI_MAGIC);
	} else {
		/*
		 * Continue where we left off before.  In this case, we
		 * know that the allocation group has free inodes.
		 */
		agbp = *IO_agbp;
		agi = XFS_BUF_TO_AGI(agbp);
		ASSERT(be32_to_cpu(agi->agi_magicnum) == XFS_AGI_MAGIC);
		ASSERT(be32_to_cpu(agi->agi_freecount) > 0);
	}
	mp = tp->t_mountp;
	agcount = mp->m_sb.sb_agcount;
	agno = be32_to_cpu(agi->agi_seqno);
	tagno = agno;
	pagno = XFS_INO_TO_AGNO(mp, parent);
	pagino = XFS_INO_TO_AGINO(mp, parent);

	/*
	 * If we have already hit the ceiling of inode blocks then clear
	 * okalloc so we scan all available agi structures for a free
	 * inode.
	 */

	if (mp->m_maxicount &&
	    mp->m_sb.sb_icount + XFS_IALLOC_INODES(mp) > mp->m_maxicount) {
		noroom = 1;
		okalloc = 0;
	}

	/*
	 * Loop until we find an allocation group that either has free inodes
	 * or in which we can allocate some inodes.  Iterate through the
	 * allocation groups upward, wrapping at the end.
	 */
	*alloc_done = B_FALSE;
	while (!agi->agi_freecount) {
		/*
		 * Don't do anything if we're not supposed to allocate
		 * any blocks, just go on to the next ag.
		 */
		if (okalloc) {
			/*
			 * Try to allocate some new inodes in the allocation
			 * group.
			 */
			if ((error = xfs_ialloc_ag_alloc(tp, agbp, &ialloced))) {
				xfs_trans_brelse(tp, agbp);
				if (error == ENOSPC) {
					*inop = NULLFSINO;
					return 0;
				} else
					return error;
			}
			if (ialloced) {
				/*
				 * We successfully allocated some inodes, return
				 * the current context to the caller so that it
				 * can commit the current transaction and call
				 * us again where we left off.
				 */
				ASSERT(be32_to_cpu(agi->agi_freecount) > 0);
				*alloc_done = B_TRUE;
				*IO_agbp = agbp;
				*inop = NULLFSINO;
				return 0;
			}
		}
		/*
		 * If it failed, give up on this ag.
		 */
		xfs_trans_brelse(tp, agbp);
		/*
		 * Go on to the next ag: get its ag header.
		 */
nextag:
		if (++tagno == agcount)
			tagno = 0;
		if (tagno == agno) {
			*inop = NULLFSINO;
			return noroom ? ENOSPC : 0;
		}
		pag = xfs_perag_get(mp, tagno);
		if (pag->pagi_inodeok == 0) {
			xfs_perag_put(pag);
			goto nextag;
		}
		error = xfs_ialloc_read_agi(mp, tp, tagno, &agbp);
		xfs_perag_put(pag);
		if (error)
			goto nextag;
		agi = XFS_BUF_TO_AGI(agbp);
		ASSERT(be32_to_cpu(agi->agi_magicnum) == XFS_AGI_MAGIC);
	}
	/*
	 * Here with an allocation group that has a free inode.
	 * Reset agno since we may have chosen a new ag in the
	 * loop above.
	 */
	agno = tagno;
	*IO_agbp = NULL;
	pag = xfs_perag_get(mp, agno);

 restart_pagno:
	cur = xfs_inobt_init_cursor(mp, tp, agbp, be32_to_cpu(agi->agi_seqno));
	/*
	 * If pagino is 0 (this is the root inode allocation) use newino.
	 * This must work because we've just allocated some.
	 */
	if (!pagino)
		pagino = be32_to_cpu(agi->agi_newino);

	error = xfs_check_agi_freecount(cur, agi);
	if (error)
		goto error0;

	/*
	 * If in the same AG as the parent, try to get near the parent.
	 */
	if (pagno == agno) {
		int		doneleft;	/* done, to the left */
		int		doneright;	/* done, to the right */
		int		searchdistance = 10;

		error = xfs_inobt_lookup(cur, pagino, XFS_LOOKUP_LE, &i);
		if (error)
			goto error0;
		XFS_WANT_CORRUPTED_GOTO(i == 1, error0);

		error = xfs_inobt_get_rec(cur, &rec, &j);
		if (error)
			goto error0;
		XFS_WANT_CORRUPTED_GOTO(i == 1, error0);

		if (rec.ir_freecount > 0) {
			/*
			 * Found a free inode in the same chunk
			 * as the parent, done.
			 */
			goto alloc_inode;
		}


		/*
		 * In the same AG as parent, but parent's chunk is full.
		 */

		/* duplicate the cursor, search left & right simultaneously */
		error = xfs_btree_dup_cursor(cur, &tcur);
		if (error)
			goto error0;

		/*
		 * Skip to last blocks looked up if same parent inode.
		 */
		if (pagino != NULLAGINO &&
		    pag->pagl_pagino == pagino &&
		    pag->pagl_leftrec != NULLAGINO &&
		    pag->pagl_rightrec != NULLAGINO) {
			error = xfs_ialloc_get_rec(tcur, pag->pagl_leftrec,
						   &trec, &doneleft, 1);
			if (error)
				goto error1;

			error = xfs_ialloc_get_rec(cur, pag->pagl_rightrec,
						   &rec, &doneright, 0);
			if (error)
				goto error1;
		} else {
			/* search left with tcur, back up 1 record */
			error = xfs_ialloc_next_rec(tcur, &trec, &doneleft, 1);
			if (error)
				goto error1;

			/* search right with cur, go forward 1 record. */
			error = xfs_ialloc_next_rec(cur, &rec, &doneright, 0);
			if (error)
				goto error1;
		}

		/*
		 * Loop until we find an inode chunk with a free inode.
		 */
		while (!doneleft || !doneright) {
			int	useleft;  /* using left inode chunk this time */

			if (!--searchdistance) {
				/*
				 * Not in range - save last search
				 * location and allocate a new inode
				 */
				xfs_btree_del_cursor(tcur, XFS_BTREE_NOERROR);
				pag->pagl_leftrec = trec.ir_startino;
				pag->pagl_rightrec = rec.ir_startino;
				pag->pagl_pagino = pagino;
				goto newino;
			}

			/* figure out the closer block if both are valid. */
			if (!doneleft && !doneright) {
				useleft = pagino -
				 (trec.ir_startino + XFS_INODES_PER_CHUNK - 1) <
				  rec.ir_startino - pagino;
			} else {
				useleft = !doneleft;
			}

			/* free inodes to the left? */
			if (useleft && trec.ir_freecount) {
				rec = trec;
				xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
				cur = tcur;

				pag->pagl_leftrec = trec.ir_startino;
				pag->pagl_rightrec = rec.ir_startino;
				pag->pagl_pagino = pagino;
				goto alloc_inode;
			}

			/* free inodes to the right? */
			if (!useleft && rec.ir_freecount) {
				xfs_btree_del_cursor(tcur, XFS_BTREE_NOERROR);

				pag->pagl_leftrec = trec.ir_startino;
				pag->pagl_rightrec = rec.ir_startino;
				pag->pagl_pagino = pagino;
				goto alloc_inode;
			}

			/* get next record to check */
			if (useleft) {
				error = xfs_ialloc_next_rec(tcur, &trec,
								 &doneleft, 1);
			} else {
				error = xfs_ialloc_next_rec(cur, &rec,
								 &doneright, 0);
			}
			if (error)
				goto error1;
		}

		/*
		 * We've reached the end of the btree. because
		 * we are only searching a small chunk of the
		 * btree each search, there is obviously free
		 * inodes closer to the parent inode than we
		 * are now. restart the search again.
		 */
		pag->pagl_pagino = NULLAGINO;
		pag->pagl_leftrec = NULLAGINO;
		pag->pagl_rightrec = NULLAGINO;
		xfs_btree_del_cursor(tcur, XFS_BTREE_NOERROR);
		xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
		goto restart_pagno;
	}

	/*
	 * In a different AG from the parent.
	 * See if the most recently allocated block has any free.
	 */
newino:
	if (be32_to_cpu(agi->agi_newino) != NULLAGINO) {
		error = xfs_inobt_lookup(cur, be32_to_cpu(agi->agi_newino),
					 XFS_LOOKUP_EQ, &i);
		if (error)
			goto error0;

		if (i == 1) {
			error = xfs_inobt_get_rec(cur, &rec, &j);
			if (error)
				goto error0;

			if (j == 1 && rec.ir_freecount > 0) {
				/*
				 * The last chunk allocated in the group
				 * still has a free inode.
				 */
				goto alloc_inode;
			}
		}
	}

	/*
	 * None left in the last group, search the whole AG
	 */
	error = xfs_inobt_lookup(cur, 0, XFS_LOOKUP_GE, &i);
	if (error)
		goto error0;
	XFS_WANT_CORRUPTED_GOTO(i == 1, error0);

	for (;;) {
		error = xfs_inobt_get_rec(cur, &rec, &i);
		if (error)
			goto error0;
		XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
		if (rec.ir_freecount > 0)
			break;
		error = xfs_btree_increment(cur, 0, &i);
		if (error)
			goto error0;
		XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
	}

alloc_inode:
	offset = xfs_ialloc_find_free(&rec.ir_free);
	ASSERT(offset >= 0);
	ASSERT(offset < XFS_INODES_PER_CHUNK);
	ASSERT((XFS_AGINO_TO_OFFSET(mp, rec.ir_startino) %
				   XFS_INODES_PER_CHUNK) == 0);
	ino = XFS_AGINO_TO_INO(mp, agno, rec.ir_startino + offset);
	rec.ir_free &= ~XFS_INOBT_MASK(offset);
	rec.ir_freecount--;
	error = xfs_inobt_update(cur, &rec);
	if (error)
		goto error0;
	be32_add_cpu(&agi->agi_freecount, -1);
	xfs_ialloc_log_agi(tp, agbp, XFS_AGI_FREECOUNT);
	pag->pagi_freecount--;

	error = xfs_check_agi_freecount(cur, agi);
	if (error)
		goto error0;

	xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
	xfs_trans_mod_sb(tp, XFS_TRANS_SB_IFREE, -1);
	xfs_perag_put(pag);
	*inop = ino;
	return 0;
error1:
	xfs_btree_del_cursor(tcur, XFS_BTREE_ERROR);
error0:
	xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
	xfs_perag_put(pag);
	return error;
}
コード例 #8
0
ファイル: irq.c プロジェクト: 3null/linux
/**
 * of_irq_parse_raw - Low level interrupt tree parsing
 * @parent:	the device interrupt parent
 * @addr:	address specifier (start of "reg" property of the device) in be32 format
 * @out_irq:	structure of_irq updated by this function
 *
 * Returns 0 on success and a negative number on error
 *
 * This function is a low-level interrupt tree walking function. It
 * can be used to do a partial walk with synthetized reg and interrupts
 * properties, for example when resolving PCI interrupts when no device
 * node exist for the parent. It takes an interrupt specifier structure as
 * input, walks the tree looking for any interrupt-map properties, translates
 * the specifier for each map, and then returns the translated map.
 */
int of_irq_parse_raw(const __be32 *addr, struct of_phandle_args *out_irq)
{
	struct device_node *ipar, *tnode, *old = NULL, *newpar = NULL;
	__be32 initial_match_array[MAX_PHANDLE_ARGS];
	const __be32 *match_array = initial_match_array;
	const __be32 *tmp, *imap, *imask, dummy_imask[] = { [0 ... MAX_PHANDLE_ARGS] = ~0 };
	u32 intsize = 1, addrsize, newintsize = 0, newaddrsize = 0;
	int imaplen, match, i;

#ifdef DEBUG
	of_print_phandle_args("of_irq_parse_raw: ", out_irq);
#endif

	ipar = of_node_get(out_irq->np);

	/* First get the #interrupt-cells property of the current cursor
	 * that tells us how to interpret the passed-in intspec. If there
	 * is none, we are nice and just walk up the tree
	 */
	do {
		tmp = of_get_property(ipar, "#interrupt-cells", NULL);
		if (tmp != NULL) {
			intsize = be32_to_cpu(*tmp);
			break;
		}
		tnode = ipar;
		ipar = of_irq_find_parent(ipar);
		of_node_put(tnode);
	} while (ipar);
	if (ipar == NULL) {
		pr_debug(" -> no parent found !\n");
		goto fail;
	}

	pr_debug("of_irq_parse_raw: ipar=%s, size=%d\n", of_node_full_name(ipar), intsize);

	if (out_irq->args_count != intsize)
		return -EINVAL;

	/* Look for this #address-cells. We have to implement the old linux
	 * trick of looking for the parent here as some device-trees rely on it
	 */
	old = of_node_get(ipar);
	do {
		tmp = of_get_property(old, "#address-cells", NULL);
		tnode = of_get_parent(old);
		of_node_put(old);
		old = tnode;
	} while (old && tmp == NULL);
	of_node_put(old);
	old = NULL;
	addrsize = (tmp == NULL) ? 2 : be32_to_cpu(*tmp);

	pr_debug(" -> addrsize=%d\n", addrsize);

	/* Range check so that the temporary buffer doesn't overflow */
	if (WARN_ON(addrsize + intsize > MAX_PHANDLE_ARGS))
		goto fail;

	/* Precalculate the match array - this simplifies match loop */
	for (i = 0; i < addrsize; i++)
		initial_match_array[i] = addr ? addr[i] : 0;
	for (i = 0; i < intsize; i++)
		initial_match_array[addrsize + i] = cpu_to_be32(out_irq->args[i]);

	/* Now start the actual "proper" walk of the interrupt tree */
	while (ipar != NULL) {
		/* Now check if cursor is an interrupt-controller and if it is
		 * then we are done
		 */
		if (of_get_property(ipar, "interrupt-controller", NULL) !=
				NULL) {
			pr_debug(" -> got it !\n");
			return 0;
		}

		/*
		 * interrupt-map parsing does not work without a reg
		 * property when #address-cells != 0
		 */
		if (addrsize && !addr) {
			pr_debug(" -> no reg passed in when needed !\n");
			goto fail;
		}

		/* Now look for an interrupt-map */
		imap = of_get_property(ipar, "interrupt-map", &imaplen);
		/* No interrupt map, check for an interrupt parent */
		if (imap == NULL) {
			pr_debug(" -> no map, getting parent\n");
			newpar = of_irq_find_parent(ipar);
			goto skiplevel;
		}
		imaplen /= sizeof(u32);

		/* Look for a mask */
		imask = of_get_property(ipar, "interrupt-map-mask", NULL);
		if (!imask)
			imask = dummy_imask;

		/* Parse interrupt-map */
		match = 0;
		while (imaplen > (addrsize + intsize + 1) && !match) {
			/* Compare specifiers */
			match = 1;
			for (i = 0; i < (addrsize + intsize); i++, imaplen--)
				match &= !((match_array[i] ^ *imap++) & imask[i]);

			pr_debug(" -> match=%d (imaplen=%d)\n", match, imaplen);

			/* Get the interrupt parent */
			if (of_irq_workarounds & OF_IMAP_NO_PHANDLE)
				newpar = of_node_get(of_irq_dflt_pic);
			else
				newpar = of_find_node_by_phandle(be32_to_cpup(imap));
			imap++;
			--imaplen;

			/* Check if not found */
			if (newpar == NULL) {
				pr_debug(" -> imap parent not found !\n");
				goto fail;
			}

			if (!of_device_is_available(newpar))
				match = 0;

			/* Get #interrupt-cells and #address-cells of new
			 * parent
			 */
			tmp = of_get_property(newpar, "#interrupt-cells", NULL);
			if (tmp == NULL) {
				pr_debug(" -> parent lacks #interrupt-cells!\n");
				goto fail;
			}
			newintsize = be32_to_cpu(*tmp);
			tmp = of_get_property(newpar, "#address-cells", NULL);
			newaddrsize = (tmp == NULL) ? 0 : be32_to_cpu(*tmp);

			pr_debug(" -> newintsize=%d, newaddrsize=%d\n",
			    newintsize, newaddrsize);

			/* Check for malformed properties */
			if (WARN_ON(newaddrsize + newintsize > MAX_PHANDLE_ARGS))
				goto fail;
			if (imaplen < (newaddrsize + newintsize))
				goto fail;

			imap += newaddrsize + newintsize;
			imaplen -= newaddrsize + newintsize;

			pr_debug(" -> imaplen=%d\n", imaplen);
		}
		if (!match)
			goto fail;

		/*
		 * Successfully parsed an interrrupt-map translation; copy new
		 * interrupt specifier into the out_irq structure
		 */
		out_irq->np = newpar;

		match_array = imap - newaddrsize - newintsize;
		for (i = 0; i < newintsize; i++)
			out_irq->args[i] = be32_to_cpup(imap - newintsize + i);
		out_irq->args_count = intsize = newintsize;
		addrsize = newaddrsize;

	skiplevel:
		/* Iterate again with new parent */
		pr_debug(" -> new parent: %s\n", of_node_full_name(newpar));
		of_node_put(ipar);
		ipar = newpar;
		newpar = NULL;
	}
 fail:
	of_node_put(ipar);
	of_node_put(newpar);

	return -EINVAL;
}
コード例 #9
0
static int emi62_load_firmware (struct usb_device *dev)
{
	const struct firmware *loader_fw = NULL;
	const struct firmware *bitstream_fw = NULL;
	const struct firmware *firmware_fw = NULL;
	const struct ihex_binrec *rec;
	int err;
	int i;
	__u32 addr;	/*                  */
	__u8 *buf;

	dev_dbg(&dev->dev, "load_firmware\n");
	buf = kmalloc(FW_LOAD_SIZE, GFP_KERNEL);
	if (!buf) {
		err( "%s - error loading firmware: error = %d", __func__, -ENOMEM);
		err = -ENOMEM;
		goto wraperr;
	}

	err = request_ihex_firmware(&loader_fw, "emi62/loader.fw", &dev->dev);
	if (err)
		goto nofw;

	err = request_ihex_firmware(&bitstream_fw, "emi62/bitstream.fw",
				    &dev->dev);
	if (err)
		goto nofw;

	err = request_ihex_firmware(&firmware_fw, FIRMWARE_FW, &dev->dev);
	if (err) {
	nofw:
		err( "%s - request_firmware() failed", __func__);
		goto wraperr;
	}

	/*                                        */
	err = emi62_set_reset(dev,1);
	if (err < 0) {
		err("%s - error loading firmware: error = %d", __func__, err);
		goto wraperr;
	}

	rec = (const struct ihex_binrec *)loader_fw->data;

	/*                                                           */
	while (rec) {
		err = emi62_writememory(dev, be32_to_cpu(rec->addr),
					rec->data, be16_to_cpu(rec->len),
					ANCHOR_LOAD_INTERNAL);
		if (err < 0) {
			err("%s - error loading firmware: error = %d", __func__, err);
			goto wraperr;
		}
		rec = ihex_next_binrec(rec);
	}

	/*                                   */
	err = emi62_set_reset(dev,0);
	if (err < 0) {
		err("%s - error loading firmware: error = %d", __func__, err);
		goto wraperr;
	}
	msleep(250);	/*                   */

	/*                                            
                                                            
                                             */
	rec = (const struct ihex_binrec *)bitstream_fw->data;
	do {
		i = 0;
		addr = be32_to_cpu(rec->addr);

		/*                                                      */
		while (rec && (i + be16_to_cpu(rec->len) < FW_LOAD_SIZE)) {
			memcpy(buf + i, rec->data, be16_to_cpu(rec->len));
			i += be16_to_cpu(rec->len);
			rec = ihex_next_binrec(rec);
		}
		err = emi62_writememory(dev, addr, buf, i, ANCHOR_LOAD_FPGA);
		if (err < 0) {
			err("%s - error loading firmware: error = %d", __func__, err);
			goto wraperr;
		}
	} while (rec);

	/*                                        */
	err = emi62_set_reset(dev,1);
	if (err < 0) {
		err("%s - error loading firmware: error = %d", __func__, err);
		goto wraperr;
	}

	/*                                                                          */
	for (rec = (const struct ihex_binrec *)loader_fw->data;
	     rec; rec = ihex_next_binrec(rec)) {
		err = emi62_writememory(dev, be32_to_cpu(rec->addr),
					rec->data, be16_to_cpu(rec->len),
					ANCHOR_LOAD_INTERNAL);
		if (err < 0) {
			err("%s - error loading firmware: error = %d", __func__, err);
			goto wraperr;
		}
	}

	/*                                   */
	err = emi62_set_reset(dev,0);
	if (err < 0) {
		err("%s - error loading firmware: error = %d", __func__, err);
		goto wraperr;
	}
	msleep(250);	/*                   */

	/*                                                                                  */

	for (rec = (const struct ihex_binrec *)firmware_fw->data;
	     rec; rec = ihex_next_binrec(rec)) {
		if (!INTERNAL_RAM(be32_to_cpu(rec->addr))) {
			err = emi62_writememory(dev, be32_to_cpu(rec->addr),
						rec->data, be16_to_cpu(rec->len),
						ANCHOR_LOAD_EXTERNAL);
			if (err < 0) {
				err("%s - error loading firmware: error = %d", __func__, err);
				goto wraperr;
			}
		}
	}

	/*                                        */
	err = emi62_set_reset(dev,1);
	if (err < 0) {
		err("%s - error loading firmware: error = %d", __func__, err);
		goto wraperr;
	}

	for (rec = (const struct ihex_binrec *)firmware_fw->data;
	     rec; rec = ihex_next_binrec(rec)) {
		if (INTERNAL_RAM(be32_to_cpu(rec->addr))) {
			err = emi62_writememory(dev, be32_to_cpu(rec->addr),
						rec->data, be16_to_cpu(rec->len),
						ANCHOR_LOAD_EXTERNAL);
			if (err < 0) {
				err("%s - error loading firmware: error = %d", __func__, err);
				goto wraperr;
			}
		}
	}

	/*                                   */
	err = emi62_set_reset(dev,0);
	if (err < 0) {
		err("%s - error loading firmware: error = %d", __func__, err);
		goto wraperr;
	}
	msleep(250);	/*                   */

	release_firmware(loader_fw);
	release_firmware(bitstream_fw);
	release_firmware(firmware_fw);

	kfree(buf);

	/*                                         
                                        */
	return 1;

wraperr:
	release_firmware(loader_fw);
	release_firmware(bitstream_fw);
	release_firmware(firmware_fw);

	kfree(buf);
	dev_err(&dev->dev, "Error\n");
	return err;
}
コード例 #10
0
ファイル: xfs_dir2_block.c プロジェクト: Addision/LVS
/*
 * Convert a V2 leaf directory to a V2 block directory if possible.
 */
int						/* error */
xfs_dir2_leaf_to_block(
	xfs_da_args_t		*args,		/* operation arguments */
	xfs_dabuf_t		*lbp,		/* leaf buffer */
	xfs_dabuf_t		*dbp)		/* data buffer */
{
	__be16			*bestsp;	/* leaf bests table */
	xfs_dir2_block_t	*block;		/* block structure */
	xfs_dir2_block_tail_t	*btp;		/* block tail */
	xfs_inode_t		*dp;		/* incore directory inode */
	xfs_dir2_data_unused_t	*dup;		/* unused data entry */
	int			error;		/* error return value */
	int			from;		/* leaf from index */
	xfs_dir2_leaf_t		*leaf;		/* leaf structure */
	xfs_dir2_leaf_entry_t	*lep;		/* leaf entry */
	xfs_dir2_leaf_tail_t	*ltp;		/* leaf tail structure */
	xfs_mount_t		*mp;		/* file system mount point */
	int			needlog;	/* need to log data header */
	int			needscan;	/* need to scan for bestfree */
	xfs_dir2_sf_hdr_t	sfh;		/* shortform header */
	int			size;		/* bytes used */
	__be16			*tagp;		/* end of entry (tag) */
	int			to;		/* block/leaf to index */
	xfs_trans_t		*tp;		/* transaction pointer */

	trace_xfs_dir2_leaf_to_block(args);

	dp = args->dp;
	tp = args->trans;
	mp = dp->i_mount;
	leaf = lbp->data;
	ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_DIR2_LEAF1_MAGIC);
	ltp = xfs_dir2_leaf_tail_p(mp, leaf);
	/*
	 * If there are data blocks other than the first one, take this
	 * opportunity to remove trailing empty data blocks that may have
	 * been left behind during no-space-reservation operations.
	 * These will show up in the leaf bests table.
	 */
	while (dp->i_d.di_size > mp->m_dirblksize) {
		bestsp = xfs_dir2_leaf_bests_p(ltp);
		if (be16_to_cpu(bestsp[be32_to_cpu(ltp->bestcount) - 1]) ==
		    mp->m_dirblksize - (uint)sizeof(block->hdr)) {
			if ((error =
			    xfs_dir2_leaf_trim_data(args, lbp,
				    (xfs_dir2_db_t)(be32_to_cpu(ltp->bestcount) - 1))))
				goto out;
		} else {
			error = 0;
			goto out;
		}
	}
	/*
	 * Read the data block if we don't already have it, give up if it fails.
	 */
	if (dbp == NULL &&
	    (error = xfs_da_read_buf(tp, dp, mp->m_dirdatablk, -1, &dbp,
		    XFS_DATA_FORK))) {
		goto out;
	}
	block = dbp->data;
	ASSERT(be32_to_cpu(block->hdr.magic) == XFS_DIR2_DATA_MAGIC);
	/*
	 * Size of the "leaf" area in the block.
	 */
	size = (uint)sizeof(block->tail) +
	       (uint)sizeof(*lep) * (be16_to_cpu(leaf->hdr.count) - be16_to_cpu(leaf->hdr.stale));
	/*
	 * Look at the last data entry.
	 */
	tagp = (__be16 *)((char *)block + mp->m_dirblksize) - 1;
	dup = (xfs_dir2_data_unused_t *)((char *)block + be16_to_cpu(*tagp));
	/*
	 * If it's not free or is too short we can't do it.
	 */
	if (be16_to_cpu(dup->freetag) != XFS_DIR2_DATA_FREE_TAG ||
	    be16_to_cpu(dup->length) < size) {
		error = 0;
		goto out;
	}
	/*
	 * Start converting it to block form.
	 */
	block->hdr.magic = cpu_to_be32(XFS_DIR2_BLOCK_MAGIC);
	needlog = 1;
	needscan = 0;
	/*
	 * Use up the space at the end of the block (blp/btp).
	 */
	xfs_dir2_data_use_free(tp, dbp, dup, mp->m_dirblksize - size, size,
		&needlog, &needscan);
	/*
	 * Initialize the block tail.
	 */
	btp = xfs_dir2_block_tail_p(mp, block);
	btp->count = cpu_to_be32(be16_to_cpu(leaf->hdr.count) - be16_to_cpu(leaf->hdr.stale));
	btp->stale = 0;
	xfs_dir2_block_log_tail(tp, dbp);
	/*
	 * Initialize the block leaf area.  We compact out stale entries.
	 */
	lep = xfs_dir2_block_leaf_p(btp);
	for (from = to = 0; from < be16_to_cpu(leaf->hdr.count); from++) {
		if (be32_to_cpu(leaf->ents[from].address) == XFS_DIR2_NULL_DATAPTR)
			continue;
		lep[to++] = leaf->ents[from];
	}
	ASSERT(to == be32_to_cpu(btp->count));
	xfs_dir2_block_log_leaf(tp, dbp, 0, be32_to_cpu(btp->count) - 1);
	/*
	 * Scan the bestfree if we need it and log the data block header.
	 */
	if (needscan)
		xfs_dir2_data_freescan(mp, (xfs_dir2_data_t *)block, &needlog);
	if (needlog)
		xfs_dir2_data_log_header(tp, dbp);
	/*
	 * Pitch the old leaf block.
	 */
	error = xfs_da_shrink_inode(args, mp->m_dirleafblk, lbp);
	lbp = NULL;
	if (error) {
		goto out;
	}
	/*
	 * Now see if the resulting block can be shrunken to shortform.
	 */
	if ((size = xfs_dir2_block_sfsize(dp, block, &sfh)) >
	    XFS_IFORK_DSIZE(dp)) {
		error = 0;
		goto out;
	}
	return xfs_dir2_block_to_sf(args, dbp, size, &sfh);
out:
	if (lbp)
		xfs_da_buf_done(lbp);
	if (dbp)
		xfs_da_buf_done(dbp);
	return error;
}
コード例 #11
0
ファイル: irq.c プロジェクト: 3null/linux
/**
 * of_irq_parse_one - Resolve an interrupt for a device
 * @device: the device whose interrupt is to be resolved
 * @index: index of the interrupt to resolve
 * @out_irq: structure of_irq filled by this function
 *
 * This function resolves an interrupt for a node by walking the interrupt tree,
 * finding which interrupt controller node it is attached to, and returning the
 * interrupt specifier that can be used to retrieve a Linux IRQ number.
 */
int of_irq_parse_one(struct device_node *device, int index, struct of_phandle_args *out_irq)
{
	struct device_node *p;
	const __be32 *intspec, *tmp, *addr;
	u32 intsize, intlen;
	int i, res = -EINVAL;

	pr_debug("of_irq_parse_one: dev=%s, index=%d\n", of_node_full_name(device), index);

	/* OldWorld mac stuff is "special", handle out of line */
	if (of_irq_workarounds & OF_IMAP_OLDWORLD_MAC)
		return of_irq_parse_oldworld(device, index, out_irq);

	/* Get the reg property (if any) */
	addr = of_get_property(device, "reg", NULL);

	/* Try the new-style interrupts-extended first */
	res = of_parse_phandle_with_args(device, "interrupts-extended",
					"#interrupt-cells", index, out_irq);
	if (!res)
		return of_irq_parse_raw(addr, out_irq);

	/* Get the interrupts property */
	intspec = of_get_property(device, "interrupts", &intlen);
	if (intspec == NULL)
		return -EINVAL;

	intlen /= sizeof(*intspec);

	pr_debug(" intspec=%d intlen=%d\n", be32_to_cpup(intspec), intlen);

	/* Look for the interrupt parent. */
	p = of_irq_find_parent(device);
	if (p == NULL)
		return -EINVAL;

	/* Get size of interrupt specifier */
	tmp = of_get_property(p, "#interrupt-cells", NULL);
	if (tmp == NULL)
		goto out;
	intsize = be32_to_cpu(*tmp);

	pr_debug(" intsize=%d intlen=%d\n", intsize, intlen);

	/* Check index */
	if ((index + 1) * intsize > intlen)
		goto out;

	/* Copy intspec into irq structure */
	intspec += index * intsize;
	out_irq->np = p;
	out_irq->args_count = intsize;
	for (i = 0; i < intsize; i++)
		out_irq->args[i] = be32_to_cpup(intspec++);

	/* Check if there are any interrupt-map translations to process */
	res = of_irq_parse_raw(addr, out_irq);
 out:
	of_node_put(p);
	return res;
}
コード例 #12
0
ファイル: xfs_dir2_block.c プロジェクト: Addision/LVS
/*
 * Remove an entry from a block format directory.
 * If that makes the block small enough to fit in shortform, transform it.
 */
int						/* error */
xfs_dir2_block_removename(
	xfs_da_args_t		*args)		/* directory operation args */
{
	xfs_dir2_block_t	*block;		/* block structure */
	xfs_dir2_leaf_entry_t	*blp;		/* block leaf pointer */
	xfs_dabuf_t		*bp;		/* block buffer */
	xfs_dir2_block_tail_t	*btp;		/* block tail */
	xfs_dir2_data_entry_t	*dep;		/* block data entry */
	xfs_inode_t		*dp;		/* incore inode */
	int			ent;		/* block leaf entry index */
	int			error;		/* error return value */
	xfs_mount_t		*mp;		/* filesystem mount point */
	int			needlog;	/* need to log block header */
	int			needscan;	/* need to fixup bestfree */
	xfs_dir2_sf_hdr_t	sfh;		/* shortform header */
	int			size;		/* shortform size */
	xfs_trans_t		*tp;		/* transaction pointer */

	trace_xfs_dir2_block_removename(args);

	/*
	 * Look up the entry in the block.  Gets the buffer and entry index.
	 * It will always be there, the vnodeops level does a lookup first.
	 */
	if ((error = xfs_dir2_block_lookup_int(args, &bp, &ent))) {
		return error;
	}
	dp = args->dp;
	tp = args->trans;
	mp = dp->i_mount;
	block = bp->data;
	btp = xfs_dir2_block_tail_p(mp, block);
	blp = xfs_dir2_block_leaf_p(btp);
	/*
	 * Point to the data entry using the leaf entry.
	 */
	dep = (xfs_dir2_data_entry_t *)
	      ((char *)block + xfs_dir2_dataptr_to_off(mp, be32_to_cpu(blp[ent].address)));
	/*
	 * Mark the data entry's space free.
	 */
	needlog = needscan = 0;
	xfs_dir2_data_make_free(tp, bp,
		(xfs_dir2_data_aoff_t)((char *)dep - (char *)block),
		xfs_dir2_data_entsize(dep->namelen), &needlog, &needscan);
	/*
	 * Fix up the block tail.
	 */
	be32_add_cpu(&btp->stale, 1);
	xfs_dir2_block_log_tail(tp, bp);
	/*
	 * Remove the leaf entry by marking it stale.
	 */
	blp[ent].address = cpu_to_be32(XFS_DIR2_NULL_DATAPTR);
	xfs_dir2_block_log_leaf(tp, bp, ent, ent);
	/*
	 * Fix up bestfree, log the header if necessary.
	 */
	if (needscan)
		xfs_dir2_data_freescan(mp, (xfs_dir2_data_t *)block, &needlog);
	if (needlog)
		xfs_dir2_data_log_header(tp, bp);
	xfs_dir2_data_check(dp, bp);
	/*
	 * See if the size as a shortform is good enough.
	 */
	if ((size = xfs_dir2_block_sfsize(dp, block, &sfh)) >
	    XFS_IFORK_DSIZE(dp)) {
		xfs_da_buf_done(bp);
		return 0;
	}
	/*
	 * If it works, do the conversion.
	 */
	return xfs_dir2_block_to_sf(args, bp, size, &sfh);
}
コード例 #13
0
ファイル: xfs_dir2_block.c プロジェクト: Addision/LVS
/*
 * Add an entry to a block directory.
 */
int						/* error */
xfs_dir2_block_addname(
	xfs_da_args_t		*args)		/* directory op arguments */
{
	xfs_dir2_data_free_t	*bf;		/* bestfree table in block */
	xfs_dir2_block_t	*block;		/* directory block structure */
	xfs_dir2_leaf_entry_t	*blp;		/* block leaf entries */
	xfs_dabuf_t		*bp;		/* buffer for block */
	xfs_dir2_block_tail_t	*btp;		/* block tail */
	int			compact;	/* need to compact leaf ents */
	xfs_dir2_data_entry_t	*dep;		/* block data entry */
	xfs_inode_t		*dp;		/* directory inode */
	xfs_dir2_data_unused_t	*dup;		/* block unused entry */
	int			error;		/* error return value */
	xfs_dir2_data_unused_t	*enddup=NULL;	/* unused at end of data */
	xfs_dahash_t		hash;		/* hash value of found entry */
	int			high;		/* high index for binary srch */
	int			highstale;	/* high stale index */
	int			lfloghigh=0;	/* last final leaf to log */
	int			lfloglow=0;	/* first final leaf to log */
	int			len;		/* length of the new entry */
	int			low;		/* low index for binary srch */
	int			lowstale;	/* low stale index */
	int			mid=0;		/* midpoint for binary srch */
	xfs_mount_t		*mp;		/* filesystem mount point */
	int			needlog;	/* need to log header */
	int			needscan;	/* need to rescan freespace */
	__be16			*tagp;		/* pointer to tag value */
	xfs_trans_t		*tp;		/* transaction structure */

	trace_xfs_dir2_block_addname(args);

	dp = args->dp;
	tp = args->trans;
	mp = dp->i_mount;
	/*
	 * Read the (one and only) directory block into dabuf bp.
	 */
	if ((error =
	    xfs_da_read_buf(tp, dp, mp->m_dirdatablk, -1, &bp, XFS_DATA_FORK))) {
		return error;
	}
	ASSERT(bp != NULL);
	block = bp->data;
	/*
	 * Check the magic number, corrupted if wrong.
	 */
	if (unlikely(be32_to_cpu(block->hdr.magic) != XFS_DIR2_BLOCK_MAGIC)) {
		XFS_CORRUPTION_ERROR("xfs_dir2_block_addname",
				     XFS_ERRLEVEL_LOW, mp, block);
		xfs_da_brelse(tp, bp);
		return XFS_ERROR(EFSCORRUPTED);
	}
	len = xfs_dir2_data_entsize(args->namelen);
	/*
	 * Set up pointers to parts of the block.
	 */
	bf = block->hdr.bestfree;
	btp = xfs_dir2_block_tail_p(mp, block);
	blp = xfs_dir2_block_leaf_p(btp);
	/*
	 * No stale entries?  Need space for entry and new leaf.
	 */
	if (!btp->stale) {
		/*
		 * Tag just before the first leaf entry.
		 */
		tagp = (__be16 *)blp - 1;
		/*
		 * Data object just before the first leaf entry.
		 */
		enddup = (xfs_dir2_data_unused_t *)((char *)block + be16_to_cpu(*tagp));
		/*
		 * If it's not free then can't do this add without cleaning up:
		 * the space before the first leaf entry needs to be free so it
		 * can be expanded to hold the pointer to the new entry.
		 */
		if (be16_to_cpu(enddup->freetag) != XFS_DIR2_DATA_FREE_TAG)
			dup = enddup = NULL;
		/*
		 * Check out the biggest freespace and see if it's the same one.
		 */
		else {
			dup = (xfs_dir2_data_unused_t *)
			      ((char *)block + be16_to_cpu(bf[0].offset));
			if (dup == enddup) {
				/*
				 * It is the biggest freespace, is it too small
				 * to hold the new leaf too?
				 */
				if (be16_to_cpu(dup->length) < len + (uint)sizeof(*blp)) {
					/*
					 * Yes, we use the second-largest
					 * entry instead if it works.
					 */
					if (be16_to_cpu(bf[1].length) >= len)
						dup = (xfs_dir2_data_unused_t *)
						      ((char *)block +
						       be16_to_cpu(bf[1].offset));
					else
						dup = NULL;
				}
			} else {
				/*
				 * Not the same free entry,
				 * just check its length.
				 */
				if (be16_to_cpu(dup->length) < len) {
					dup = NULL;
				}
			}
		}
		compact = 0;
	}
	/*
	 * If there are stale entries we'll use one for the leaf.
	 * Is the biggest entry enough to avoid compaction?
	 */
	else if (be16_to_cpu(bf[0].length) >= len) {
		dup = (xfs_dir2_data_unused_t *)
		      ((char *)block + be16_to_cpu(bf[0].offset));
		compact = 0;
	}
	/*
	 * Will need to compact to make this work.
	 */
	else {
		/*
		 * Tag just before the first leaf entry.
		 */
		tagp = (__be16 *)blp - 1;
		/*
		 * Data object just before the first leaf entry.
		 */
		dup = (xfs_dir2_data_unused_t *)((char *)block + be16_to_cpu(*tagp));
		/*
		 * If it's not free then the data will go where the
		 * leaf data starts now, if it works at all.
		 */
		if (be16_to_cpu(dup->freetag) == XFS_DIR2_DATA_FREE_TAG) {
			if (be16_to_cpu(dup->length) + (be32_to_cpu(btp->stale) - 1) *
			    (uint)sizeof(*blp) < len)
				dup = NULL;
		} else if ((be32_to_cpu(btp->stale) - 1) * (uint)sizeof(*blp) < len)
			dup = NULL;
		else
			dup = (xfs_dir2_data_unused_t *)blp;
		compact = 1;
	}
	/*
	 * If this isn't a real add, we're done with the buffer.
	 */
	if (args->op_flags & XFS_DA_OP_JUSTCHECK)
		xfs_da_brelse(tp, bp);
	/*
	 * If we don't have space for the new entry & leaf ...
	 */
	if (!dup) {
		/*
		 * Not trying to actually do anything, or don't have
		 * a space reservation: return no-space.
		 */
		if ((args->op_flags & XFS_DA_OP_JUSTCHECK) || args->total == 0)
			return XFS_ERROR(ENOSPC);
		/*
		 * Convert to the next larger format.
		 * Then add the new entry in that format.
		 */
		error = xfs_dir2_block_to_leaf(args, bp);
		xfs_da_buf_done(bp);
		if (error)
			return error;
		return xfs_dir2_leaf_addname(args);
	}
	/*
	 * Just checking, and it would work, so say so.
	 */
	if (args->op_flags & XFS_DA_OP_JUSTCHECK)
		return 0;
	needlog = needscan = 0;
	/*
	 * If need to compact the leaf entries, do it now.
	 * Leave the highest-numbered stale entry stale.
	 * XXX should be the one closest to mid but mid is not yet computed.
	 */
	if (compact) {
		int	fromidx;		/* source leaf index */
		int	toidx;			/* target leaf index */

		for (fromidx = toidx = be32_to_cpu(btp->count) - 1,
			highstale = lfloghigh = -1;
		     fromidx >= 0;
		     fromidx--) {
			if (be32_to_cpu(blp[fromidx].address) == XFS_DIR2_NULL_DATAPTR) {
				if (highstale == -1)
					highstale = toidx;
				else {
					if (lfloghigh == -1)
						lfloghigh = toidx;
					continue;
				}
			}
			if (fromidx < toidx)
				blp[toidx] = blp[fromidx];
			toidx--;
		}
		lfloglow = toidx + 1 - (be32_to_cpu(btp->stale) - 1);
		lfloghigh -= be32_to_cpu(btp->stale) - 1;
		be32_add_cpu(&btp->count, -(be32_to_cpu(btp->stale) - 1));
		xfs_dir2_data_make_free(tp, bp,
			(xfs_dir2_data_aoff_t)((char *)blp - (char *)block),
			(xfs_dir2_data_aoff_t)((be32_to_cpu(btp->stale) - 1) * sizeof(*blp)),
			&needlog, &needscan);
		blp += be32_to_cpu(btp->stale) - 1;
		btp->stale = cpu_to_be32(1);
		/*
		 * If we now need to rebuild the bestfree map, do so.
		 * This needs to happen before the next call to use_free.
		 */
		if (needscan) {
			xfs_dir2_data_freescan(mp, (xfs_dir2_data_t *)block, &needlog);
			needscan = 0;
		}
	}
	/*
	 * Set leaf logging boundaries to impossible state.
	 * For the no-stale case they're set explicitly.
	 */
	else if (btp->stale) {
		lfloglow = be32_to_cpu(btp->count);
		lfloghigh = -1;
	}
	/*
	 * Find the slot that's first lower than our hash value, -1 if none.
	 */
	for (low = 0, high = be32_to_cpu(btp->count) - 1; low <= high; ) {
		mid = (low + high) >> 1;
		if ((hash = be32_to_cpu(blp[mid].hashval)) == args->hashval)
			break;
		if (hash < args->hashval)
			low = mid + 1;
		else
			high = mid - 1;
	}
	while (mid >= 0 && be32_to_cpu(blp[mid].hashval) >= args->hashval) {
		mid--;
	}
	/*
	 * No stale entries, will use enddup space to hold new leaf.
	 */
	if (!btp->stale) {
		/*
		 * Mark the space needed for the new leaf entry, now in use.
		 */
		xfs_dir2_data_use_free(tp, bp, enddup,
			(xfs_dir2_data_aoff_t)
			((char *)enddup - (char *)block + be16_to_cpu(enddup->length) -
			 sizeof(*blp)),
			(xfs_dir2_data_aoff_t)sizeof(*blp),
			&needlog, &needscan);
		/*
		 * Update the tail (entry count).
		 */
		be32_add_cpu(&btp->count, 1);
		/*
		 * If we now need to rebuild the bestfree map, do so.
		 * This needs to happen before the next call to use_free.
		 */
		if (needscan) {
			xfs_dir2_data_freescan(mp, (xfs_dir2_data_t *)block,
				&needlog);
			needscan = 0;
		}
		/*
		 * Adjust pointer to the first leaf entry, we're about to move
		 * the table up one to open up space for the new leaf entry.
		 * Then adjust our index to match.
		 */
		blp--;
		mid++;
		if (mid)
			memmove(blp, &blp[1], mid * sizeof(*blp));
		lfloglow = 0;
		lfloghigh = mid;
	}
	/*
	 * Use a stale leaf for our new entry.
	 */
	else {
		for (lowstale = mid;
		     lowstale >= 0 &&
			be32_to_cpu(blp[lowstale].address) != XFS_DIR2_NULL_DATAPTR;
		     lowstale--)
			continue;
		for (highstale = mid + 1;
		     highstale < be32_to_cpu(btp->count) &&
			be32_to_cpu(blp[highstale].address) != XFS_DIR2_NULL_DATAPTR &&
			(lowstale < 0 || mid - lowstale > highstale - mid);
		     highstale++)
			continue;
		/*
		 * Move entries toward the low-numbered stale entry.
		 */
		if (lowstale >= 0 &&
		    (highstale == be32_to_cpu(btp->count) ||
		     mid - lowstale <= highstale - mid)) {
			if (mid - lowstale)
				memmove(&blp[lowstale], &blp[lowstale + 1],
					(mid - lowstale) * sizeof(*blp));
			lfloglow = MIN(lowstale, lfloglow);
			lfloghigh = MAX(mid, lfloghigh);
		}
		/*
		 * Move entries toward the high-numbered stale entry.
		 */
		else {
			ASSERT(highstale < be32_to_cpu(btp->count));
			mid++;
			if (highstale - mid)
				memmove(&blp[mid + 1], &blp[mid],
					(highstale - mid) * sizeof(*blp));
			lfloglow = MIN(mid, lfloglow);
			lfloghigh = MAX(highstale, lfloghigh);
		}
		be32_add_cpu(&btp->stale, -1);
	}
	/*
	 * Point to the new data entry.
	 */
	dep = (xfs_dir2_data_entry_t *)dup;
	/*
	 * Fill in the leaf entry.
	 */
	blp[mid].hashval = cpu_to_be32(args->hashval);
	blp[mid].address = cpu_to_be32(xfs_dir2_byte_to_dataptr(mp,
				(char *)dep - (char *)block));
	xfs_dir2_block_log_leaf(tp, bp, lfloglow, lfloghigh);
	/*
	 * Mark space for the data entry used.
	 */
	xfs_dir2_data_use_free(tp, bp, dup,
		(xfs_dir2_data_aoff_t)((char *)dup - (char *)block),
		(xfs_dir2_data_aoff_t)len, &needlog, &needscan);
	/*
	 * Create the new data entry.
	 */
	dep->inumber = cpu_to_be64(args->inumber);
	dep->namelen = args->namelen;
	memcpy(dep->name, args->name, args->namelen);
	tagp = xfs_dir2_data_entry_tag_p(dep);
	*tagp = cpu_to_be16((char *)dep - (char *)block);
	/*
	 * Clean up the bestfree array and log the header, tail, and entry.
	 */
	if (needscan)
		xfs_dir2_data_freescan(mp, (xfs_dir2_data_t *)block, &needlog);
	if (needlog)
		xfs_dir2_data_log_header(tp, bp);
	xfs_dir2_block_log_tail(tp, bp);
	xfs_dir2_data_log_entry(tp, bp, dep);
	xfs_dir2_data_check(dp, bp);
	xfs_da_buf_done(bp);
	return 0;
}
コード例 #14
0
ファイル: xfs_dir2_block.c プロジェクト: Addision/LVS
/*
 * Internal block lookup routine.
 */
static int					/* error */
xfs_dir2_block_lookup_int(
	xfs_da_args_t		*args,		/* dir lookup arguments */
	xfs_dabuf_t		**bpp,		/* returned block buffer */
	int			*entno)		/* returned entry number */
{
	xfs_dir2_dataptr_t	addr;		/* data entry address */
	xfs_dir2_block_t	*block;		/* block structure */
	xfs_dir2_leaf_entry_t	*blp;		/* block leaf entries */
	xfs_dabuf_t		*bp;		/* block buffer */
	xfs_dir2_block_tail_t	*btp;		/* block tail */
	xfs_dir2_data_entry_t	*dep;		/* block data entry */
	xfs_inode_t		*dp;		/* incore inode */
	int			error;		/* error return value */
	xfs_dahash_t		hash;		/* found hash value */
	int			high;		/* binary search high index */
	int			low;		/* binary search low index */
	int			mid;		/* binary search current idx */
	xfs_mount_t		*mp;		/* filesystem mount point */
	xfs_trans_t		*tp;		/* transaction pointer */
	enum xfs_dacmp		cmp;		/* comparison result */

	dp = args->dp;
	tp = args->trans;
	mp = dp->i_mount;
	/*
	 * Read the buffer, return error if we can't get it.
	 */
	if ((error =
	    xfs_da_read_buf(tp, dp, mp->m_dirdatablk, -1, &bp, XFS_DATA_FORK))) {
		return error;
	}
	ASSERT(bp != NULL);
	block = bp->data;
	xfs_dir2_data_check(dp, bp);
	btp = xfs_dir2_block_tail_p(mp, block);
	blp = xfs_dir2_block_leaf_p(btp);
	/*
	 * Loop doing a binary search for our hash value.
	 * Find our entry, ENOENT if it's not there.
	 */
	for (low = 0, high = be32_to_cpu(btp->count) - 1; ; ) {
		ASSERT(low <= high);
		mid = (low + high) >> 1;
		if ((hash = be32_to_cpu(blp[mid].hashval)) == args->hashval)
			break;
		if (hash < args->hashval)
			low = mid + 1;
		else
			high = mid - 1;
		if (low > high) {
			ASSERT(args->op_flags & XFS_DA_OP_OKNOENT);
			xfs_da_brelse(tp, bp);
			return XFS_ERROR(ENOENT);
		}
	}
	/*
	 * Back up to the first one with the right hash value.
	 */
	while (mid > 0 && be32_to_cpu(blp[mid - 1].hashval) == args->hashval) {
		mid--;
	}
	/*
	 * Now loop forward through all the entries with the
	 * right hash value looking for our name.
	 */
	do {
		if ((addr = be32_to_cpu(blp[mid].address)) == XFS_DIR2_NULL_DATAPTR)
			continue;
		/*
		 * Get pointer to the entry from the leaf.
		 */
		dep = (xfs_dir2_data_entry_t *)
			((char *)block + xfs_dir2_dataptr_to_off(mp, addr));
		/*
		 * Compare name and if it's an exact match, return the index
		 * and buffer. If it's the first case-insensitive match, store
		 * the index and buffer and continue looking for an exact match.
		 */
		cmp = mp->m_dirnameops->compname(args, dep->name, dep->namelen);
		if (cmp != XFS_CMP_DIFFERENT && cmp != args->cmpresult) {
			args->cmpresult = cmp;
			*bpp = bp;
			*entno = mid;
			if (cmp == XFS_CMP_EXACT)
				return 0;
		}
	} while (++mid < be32_to_cpu(btp->count) &&
			be32_to_cpu(blp[mid].hashval) == hash);

	ASSERT(args->op_flags & XFS_DA_OP_OKNOENT);
	/*
	 * Here, we can only be doing a lookup (not a rename or replace).
	 * If a case-insensitive match was found earlier, return success.
	 */
	if (args->cmpresult == XFS_CMP_CASE)
		return 0;
	/*
	 * No match, release the buffer and return ENOENT.
	 */
	xfs_da_brelse(tp, bp);
	return XFS_ERROR(ENOENT);
}
コード例 #15
0
ファイル: xfs_qm.c プロジェクト: DenisLug/mptcp
/*
 * Purge a dquot from all tracking data structures and free it.
 */
STATIC int
xfs_qm_dqpurge(
	struct xfs_dquot	*dqp,
	void			*data)
{
	struct xfs_mount	*mp = dqp->q_mount;
	struct xfs_quotainfo	*qi = mp->m_quotainfo;

	xfs_dqlock(dqp);
	if ((dqp->dq_flags & XFS_DQ_FREEING) || dqp->q_nrefs != 0) {
		xfs_dqunlock(dqp);
		return -EAGAIN;
	}

	dqp->dq_flags |= XFS_DQ_FREEING;

	xfs_dqflock(dqp);

	/*
	 * If we are turning this type of quotas off, we don't care
	 * about the dirty metadata sitting in this dquot. OTOH, if
	 * we're unmounting, we do care, so we flush it and wait.
	 */
	if (XFS_DQ_IS_DIRTY(dqp)) {
		struct xfs_buf	*bp = NULL;
		int		error;

		/*
		 * We don't care about getting disk errors here. We need
		 * to purge this dquot anyway, so we go ahead regardless.
		 */
		error = xfs_qm_dqflush(dqp, &bp);
		if (error) {
			xfs_warn(mp, "%s: dquot %p flush failed",
				__func__, dqp);
		} else {
			error = xfs_bwrite(bp);
			xfs_buf_relse(bp);
		}
		xfs_dqflock(dqp);
	}

	ASSERT(atomic_read(&dqp->q_pincount) == 0);
	ASSERT(XFS_FORCED_SHUTDOWN(mp) ||
	       !(dqp->q_logitem.qli_item.li_flags & XFS_LI_IN_AIL));

	xfs_dqfunlock(dqp);
	xfs_dqunlock(dqp);

	radix_tree_delete(xfs_dquot_tree(qi, dqp->q_core.d_flags),
			  be32_to_cpu(dqp->q_core.d_id));
	qi->qi_dquots--;

	/*
	 * We move dquots to the freelist as soon as their reference count
	 * hits zero, so it really should be on the freelist here.
	 */
	ASSERT(!list_empty(&dqp->q_lru));
	list_lru_del(&qi->qi_lru, &dqp->q_lru);
	XFS_STATS_DEC(xs_qm_dquot_unused);

	xfs_qm_dqdestroy(dqp);
	return 0;
}
コード例 #16
0
ファイル: transport.c プロジェクト: guribe94/linux
int
SendReceiveBlockingLock(const unsigned int xid, struct cifs_tcon *tcon,
	    struct smb_hdr *in_buf, struct smb_hdr *out_buf,
	    int *pbytes_returned)
{
	int rc = 0;
	int rstart = 0;
	struct mid_q_entry *midQ;
	struct cifs_ses *ses;
	unsigned int len = be32_to_cpu(in_buf->smb_buf_length);
	struct kvec iov = { .iov_base = in_buf, .iov_len = len };
	struct smb_rqst rqst = { .rq_iov = &iov, .rq_nvec = 1 };

	if (tcon == NULL || tcon->ses == NULL) {
		cifs_dbg(VFS, "Null smb session\n");
		return -EIO;
	}
	ses = tcon->ses;

	if (ses->server == NULL) {
		cifs_dbg(VFS, "Null tcp session\n");
		return -EIO;
	}

	if (ses->server->tcpStatus == CifsExiting)
		return -ENOENT;

	/* Ensure that we do not send more than 50 overlapping requests
	   to the same server. We may make this configurable later or
	   use ses->maxReq */

	if (len > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) {
		cifs_dbg(VFS, "Illegal length, greater than maximum frame, %d\n",
			 len);
		return -EIO;
	}

	rc = wait_for_free_request(ses->server, CIFS_BLOCKING_OP, 0);
	if (rc)
		return rc;

	/* make sure that we sign in the same order that we send on this socket
	   and avoid races inside tcp sendmsg code that could cause corruption
	   of smb data */

	mutex_lock(&ses->server->srv_mutex);

	rc = allocate_mid(ses, in_buf, &midQ);
	if (rc) {
		mutex_unlock(&ses->server->srv_mutex);
		return rc;
	}

	rc = cifs_sign_smb(in_buf, ses->server, &midQ->sequence_number);
	if (rc) {
		cifs_delete_mid(midQ);
		mutex_unlock(&ses->server->srv_mutex);
		return rc;
	}

	midQ->mid_state = MID_REQUEST_SUBMITTED;
	cifs_in_send_inc(ses->server);
	rc = smb_send(ses->server, in_buf, len);
	cifs_in_send_dec(ses->server);
	cifs_save_when_sent(midQ);

	if (rc < 0)
		ses->server->sequence_number -= 2;

	mutex_unlock(&ses->server->srv_mutex);

	if (rc < 0) {
		cifs_delete_mid(midQ);
		return rc;
	}

	/* Wait for a reply - allow signals to interrupt. */
	rc = wait_event_interruptible(ses->server->response_q,
		(!(midQ->mid_state == MID_REQUEST_SUBMITTED)) ||
		((ses->server->tcpStatus != CifsGood) &&
		 (ses->server->tcpStatus != CifsNew)));

	/* Were we interrupted by a signal ? */
	if ((rc == -ERESTARTSYS) &&
		(midQ->mid_state == MID_REQUEST_SUBMITTED) &&
		((ses->server->tcpStatus == CifsGood) ||
		 (ses->server->tcpStatus == CifsNew))) {

		if (in_buf->Command == SMB_COM_TRANSACTION2) {
			/* POSIX lock. We send a NT_CANCEL SMB to cause the
			   blocking lock to return. */
			rc = send_cancel(ses->server, &rqst, midQ);
			if (rc) {
				cifs_delete_mid(midQ);
				return rc;
			}
		} else {
			/* Windows lock. We send a LOCKINGX_CANCEL_LOCK
			   to cause the blocking lock to return. */

			rc = send_lock_cancel(xid, tcon, in_buf, out_buf);

			/* If we get -ENOLCK back the lock may have
			   already been removed. Don't exit in this case. */
			if (rc && rc != -ENOLCK) {
				cifs_delete_mid(midQ);
				return rc;
			}
		}

		rc = wait_for_response(ses->server, midQ);
		if (rc) {
			send_cancel(ses->server, &rqst, midQ);
			spin_lock(&GlobalMid_Lock);
			if (midQ->mid_state == MID_REQUEST_SUBMITTED) {
				/* no longer considered to be "in-flight" */
				midQ->callback = DeleteMidQEntry;
				spin_unlock(&GlobalMid_Lock);
				return rc;
			}
			spin_unlock(&GlobalMid_Lock);
		}

		/* We got the response - restart system call. */
		rstart = 1;
	}

	rc = cifs_sync_mid_result(midQ, ses->server);
	if (rc != 0)
		return rc;

	/* rcvd frame is ok */
	if (out_buf == NULL || midQ->mid_state != MID_RESPONSE_RECEIVED) {
		rc = -EIO;
		cifs_dbg(VFS, "Bad MID state?\n");
		goto out;
	}

	*pbytes_returned = get_rfc1002_length(midQ->resp_buf);
	memcpy(out_buf, midQ->resp_buf, *pbytes_returned + 4);
	rc = cifs_check_receive(midQ, ses->server, 0);
out:
	cifs_delete_mid(midQ);
	if (rstart && rc == -EACCES)
		return -ERESTARTSYS;
	return rc;
}
コード例 #17
0
ファイル: xfs_qm.c プロジェクト: DenisLug/mptcp
STATIC int
xfs_qm_dquot_walk(
	struct xfs_mount	*mp,
	int			type,
	int			(*execute)(struct xfs_dquot *dqp, void *data),
	void			*data)
{
	struct xfs_quotainfo	*qi = mp->m_quotainfo;
	struct radix_tree_root	*tree = xfs_dquot_tree(qi, type);
	uint32_t		next_index;
	int			last_error = 0;
	int			skipped;
	int			nr_found;

restart:
	skipped = 0;
	next_index = 0;
	nr_found = 0;

	while (1) {
		struct xfs_dquot *batch[XFS_DQ_LOOKUP_BATCH];
		int		error = 0;
		int		i;

		mutex_lock(&qi->qi_tree_lock);
		nr_found = radix_tree_gang_lookup(tree, (void **)batch,
					next_index, XFS_DQ_LOOKUP_BATCH);
		if (!nr_found) {
			mutex_unlock(&qi->qi_tree_lock);
			break;
		}

		for (i = 0; i < nr_found; i++) {
			struct xfs_dquot *dqp = batch[i];

			next_index = be32_to_cpu(dqp->q_core.d_id) + 1;

			error = execute(batch[i], data);
			if (error == -EAGAIN) {
				skipped++;
				continue;
			}
			if (error && last_error != -EFSCORRUPTED)
				last_error = error;
		}

		mutex_unlock(&qi->qi_tree_lock);

		/* bail out if the filesystem is corrupted.  */
		if (last_error == -EFSCORRUPTED) {
			skipped = 0;
			break;
		}
	}

	if (skipped) {
		delay(1);
		goto restart;
	}

	return last_error;
}
コード例 #18
0
ファイル: transport.c プロジェクト: guribe94/linux
int
compound_send_recv(const unsigned int xid, struct cifs_ses *ses,
		   const int flags, const int num_rqst, struct smb_rqst *rqst,
		   int *resp_buf_type, struct kvec *resp_iov)
{
	int i, j, rc = 0;
	int timeout, optype;
	struct mid_q_entry *midQ[MAX_COMPOUND];
	unsigned int credits = 1;
	char *buf;

	timeout = flags & CIFS_TIMEOUT_MASK;
	optype = flags & CIFS_OP_MASK;

	for (i = 0; i < num_rqst; i++)
		resp_buf_type[i] = CIFS_NO_BUFFER;  /* no response buf yet */

	if ((ses == NULL) || (ses->server == NULL)) {
		cifs_dbg(VFS, "Null session\n");
		return -EIO;
	}

	if (ses->server->tcpStatus == CifsExiting)
		return -ENOENT;

	/*
	 * Ensure that we do not send more than 50 overlapping requests
	 * to the same server. We may make this configurable later or
	 * use ses->maxReq.
	 */
	rc = wait_for_free_request(ses->server, timeout, optype);
	if (rc)
		return rc;

	/*
	 * Make sure that we sign in the same order that we send on this socket
	 * and avoid races inside tcp sendmsg code that could cause corruption
	 * of smb data.
	 */

	mutex_lock(&ses->server->srv_mutex);

	for (i = 0; i < num_rqst; i++) {
		midQ[i] = ses->server->ops->setup_request(ses, &rqst[i]);
		if (IS_ERR(midQ[i])) {
			for (j = 0; j < i; j++)
				cifs_delete_mid(midQ[j]);
			mutex_unlock(&ses->server->srv_mutex);
			/* Update # of requests on wire to server */
			add_credits(ses->server, 1, optype);
			return PTR_ERR(midQ[i]);
		}

		midQ[i]->mid_state = MID_REQUEST_SUBMITTED;
	}

	cifs_in_send_inc(ses->server);
	rc = smb_send_rqst(ses->server, num_rqst, rqst, flags);
	cifs_in_send_dec(ses->server);

	for (i = 0; i < num_rqst; i++)
		cifs_save_when_sent(midQ[i]);

	if (rc < 0)
		ses->server->sequence_number -= 2;

	mutex_unlock(&ses->server->srv_mutex);

	for (i = 0; i < num_rqst; i++) {
		if (rc < 0)
			goto out;

		if ((ses->status == CifsNew) || (optype & CIFS_NEG_OP))
			smb311_update_preauth_hash(ses, rqst[i].rq_iov,
						   rqst[i].rq_nvec);

		if (timeout == CIFS_ASYNC_OP)
			goto out;

		rc = wait_for_response(ses->server, midQ[i]);
		if (rc != 0) {
			cifs_dbg(FYI, "Cancelling wait for mid %llu\n",
				 midQ[i]->mid);
			send_cancel(ses->server, &rqst[i], midQ[i]);
			spin_lock(&GlobalMid_Lock);
			if (midQ[i]->mid_state == MID_REQUEST_SUBMITTED) {
				midQ[i]->mid_flags |= MID_WAIT_CANCELLED;
				midQ[i]->callback = DeleteMidQEntry;
				spin_unlock(&GlobalMid_Lock);
				add_credits(ses->server, 1, optype);
				return rc;
			}
			spin_unlock(&GlobalMid_Lock);
		}

		rc = cifs_sync_mid_result(midQ[i], ses->server);
		if (rc != 0) {
			add_credits(ses->server, 1, optype);
			return rc;
		}

		if (!midQ[i]->resp_buf ||
		    midQ[i]->mid_state != MID_RESPONSE_RECEIVED) {
			rc = -EIO;
			cifs_dbg(FYI, "Bad MID state?\n");
			goto out;
		}

		buf = (char *)midQ[i]->resp_buf;
		resp_iov[i].iov_base = buf;
		resp_iov[i].iov_len = midQ[i]->resp_buf_size +
			ses->server->vals->header_preamble_size;

		if (midQ[i]->large_buf)
			resp_buf_type[i] = CIFS_LARGE_BUFFER;
		else
			resp_buf_type[i] = CIFS_SMALL_BUFFER;

		if ((ses->status == CifsNew) || (optype & CIFS_NEG_OP)) {
			struct kvec iov = {
				.iov_base = resp_iov[i].iov_base,
				.iov_len = resp_iov[i].iov_len
			};
			smb311_update_preauth_hash(ses, &iov, 1);
		}

		credits = ses->server->ops->get_credits(midQ[i]);

		rc = ses->server->ops->check_receive(midQ[i], ses->server,
						     flags & CIFS_LOG_ERROR);

		/* mark it so buf will not be freed by cifs_delete_mid */
		if ((flags & CIFS_NO_RESP) == 0)
			midQ[i]->resp_buf = NULL;
	}
out:
	for (i = 0; i < num_rqst; i++)
		cifs_delete_mid(midQ[i]);
	add_credits(ses->server, credits, optype);

	return rc;
}

int
cifs_send_recv(const unsigned int xid, struct cifs_ses *ses,
	       struct smb_rqst *rqst, int *resp_buf_type, const int flags,
	       struct kvec *resp_iov)
{
	return compound_send_recv(xid, ses, flags, 1, rqst, resp_buf_type,
				  resp_iov);
}

int
SendReceive2(const unsigned int xid, struct cifs_ses *ses,
	     struct kvec *iov, int n_vec, int *resp_buf_type /* ret */,
	     const int flags, struct kvec *resp_iov)
{
	struct smb_rqst rqst;
	struct kvec s_iov[CIFS_MAX_IOV_SIZE], *new_iov;
	int rc;

	if (n_vec + 1 > CIFS_MAX_IOV_SIZE) {
		new_iov = kmalloc_array(n_vec + 1, sizeof(struct kvec),
					GFP_KERNEL);
		if (!new_iov) {
			/* otherwise cifs_send_recv below sets resp_buf_type */
			*resp_buf_type = CIFS_NO_BUFFER;
			return -ENOMEM;
		}
	} else
		new_iov = s_iov;

	/* 1st iov is a RFC1001 length followed by the rest of the packet */
	memcpy(new_iov + 1, iov, (sizeof(struct kvec) * n_vec));

	new_iov[0].iov_base = new_iov[1].iov_base;
	new_iov[0].iov_len = 4;
	new_iov[1].iov_base += 4;
	new_iov[1].iov_len -= 4;

	memset(&rqst, 0, sizeof(struct smb_rqst));
	rqst.rq_iov = new_iov;
	rqst.rq_nvec = n_vec + 1;

	rc = cifs_send_recv(xid, ses, &rqst, resp_buf_type, flags, resp_iov);
	if (n_vec + 1 > CIFS_MAX_IOV_SIZE)
		kfree(new_iov);
	return rc;
}

int
SendReceive(const unsigned int xid, struct cifs_ses *ses,
	    struct smb_hdr *in_buf, struct smb_hdr *out_buf,
	    int *pbytes_returned, const int timeout)
{
	int rc = 0;
	struct mid_q_entry *midQ;
	unsigned int len = be32_to_cpu(in_buf->smb_buf_length);
	struct kvec iov = { .iov_base = in_buf, .iov_len = len };
	struct smb_rqst rqst = { .rq_iov = &iov, .rq_nvec = 1 };

	if (ses == NULL) {
		cifs_dbg(VFS, "Null smb session\n");
		return -EIO;
	}
	if (ses->server == NULL) {
		cifs_dbg(VFS, "Null tcp session\n");
		return -EIO;
	}

	if (ses->server->tcpStatus == CifsExiting)
		return -ENOENT;

	/* Ensure that we do not send more than 50 overlapping requests
	   to the same server. We may make this configurable later or
	   use ses->maxReq */

	if (len > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) {
		cifs_dbg(VFS, "Illegal length, greater than maximum frame, %d\n",
			 len);
		return -EIO;
	}

	rc = wait_for_free_request(ses->server, timeout, 0);
	if (rc)
		return rc;

	/* make sure that we sign in the same order that we send on this socket
	   and avoid races inside tcp sendmsg code that could cause corruption
	   of smb data */

	mutex_lock(&ses->server->srv_mutex);

	rc = allocate_mid(ses, in_buf, &midQ);
	if (rc) {
		mutex_unlock(&ses->server->srv_mutex);
		/* Update # of requests on wire to server */
		add_credits(ses->server, 1, 0);
		return rc;
	}

	rc = cifs_sign_smb(in_buf, ses->server, &midQ->sequence_number);
	if (rc) {
		mutex_unlock(&ses->server->srv_mutex);
		goto out;
	}

	midQ->mid_state = MID_REQUEST_SUBMITTED;

	cifs_in_send_inc(ses->server);
	rc = smb_send(ses->server, in_buf, len);
	cifs_in_send_dec(ses->server);
	cifs_save_when_sent(midQ);

	if (rc < 0)
		ses->server->sequence_number -= 2;

	mutex_unlock(&ses->server->srv_mutex);

	if (rc < 0)
		goto out;

	if (timeout == CIFS_ASYNC_OP)
		goto out;

	rc = wait_for_response(ses->server, midQ);
	if (rc != 0) {
		send_cancel(ses->server, &rqst, midQ);
		spin_lock(&GlobalMid_Lock);
		if (midQ->mid_state == MID_REQUEST_SUBMITTED) {
			/* no longer considered to be "in-flight" */
			midQ->callback = DeleteMidQEntry;
			spin_unlock(&GlobalMid_Lock);
			add_credits(ses->server, 1, 0);
			return rc;
		}
		spin_unlock(&GlobalMid_Lock);
	}

	rc = cifs_sync_mid_result(midQ, ses->server);
	if (rc != 0) {
		add_credits(ses->server, 1, 0);
		return rc;
	}

	if (!midQ->resp_buf || !out_buf ||
	    midQ->mid_state != MID_RESPONSE_RECEIVED) {
		rc = -EIO;
		cifs_dbg(VFS, "Bad MID state?\n");
		goto out;
	}

	*pbytes_returned = get_rfc1002_length(midQ->resp_buf);
	memcpy(out_buf, midQ->resp_buf, *pbytes_returned + 4);
	rc = cifs_check_receive(midQ, ses->server, 0);
out:
	cifs_delete_mid(midQ);
	add_credits(ses->server, 1, 0);

	return rc;
}

/* We send a LOCKINGX_CANCEL_LOCK to cause the Windows
   blocking lock to return. */

static int
send_lock_cancel(const unsigned int xid, struct cifs_tcon *tcon,
			struct smb_hdr *in_buf,
			struct smb_hdr *out_buf)
{
	int bytes_returned;
	struct cifs_ses *ses = tcon->ses;
	LOCK_REQ *pSMB = (LOCK_REQ *)in_buf;

	/* We just modify the current in_buf to change
	   the type of lock from LOCKING_ANDX_SHARED_LOCK
	   or LOCKING_ANDX_EXCLUSIVE_LOCK to
	   LOCKING_ANDX_CANCEL_LOCK. */

	pSMB->LockType = LOCKING_ANDX_CANCEL_LOCK|LOCKING_ANDX_LARGE_FILES;
	pSMB->Timeout = 0;
	pSMB->hdr.Mid = get_next_mid(ses->server);

	return SendReceive(xid, ses, in_buf, out_buf,
			&bytes_returned, 0);
}
コード例 #19
0
ファイル: xfs_ialloc.c プロジェクト: 3null/fastsocket
/*
 * Allocate new inodes in the allocation group specified by agbp.
 * Return 0 for success, else error code.
 */
STATIC int				/* error code or 0 */
xfs_ialloc_ag_alloc(
	xfs_trans_t	*tp,		/* transaction pointer */
	xfs_buf_t	*agbp,		/* alloc group buffer */
	int		*alloc)
{
	xfs_agi_t	*agi;		/* allocation group header */
	xfs_alloc_arg_t	args;		/* allocation argument structure */
	xfs_btree_cur_t	*cur;		/* inode btree cursor */
	xfs_agnumber_t	agno;
	int		error;
	int		i;
	xfs_agino_t	newino;		/* new first inode's number */
	xfs_agino_t	newlen;		/* new number of inodes */
	xfs_agino_t	thisino;	/* current inode number, for loop */
	int		isaligned = 0;	/* inode allocation at stripe unit */
					/* boundary */
	struct xfs_perag *pag;

	memset(&args, 0, sizeof(args));
	args.tp = tp;
	args.mp = tp->t_mountp;

	/*
	 * Locking will ensure that we don't have two callers in here
	 * at one time.
	 */
	newlen = XFS_IALLOC_INODES(args.mp);
	if (args.mp->m_maxicount &&
	    args.mp->m_sb.sb_icount + newlen > args.mp->m_maxicount)
		return XFS_ERROR(ENOSPC);
	args.minlen = args.maxlen = XFS_IALLOC_BLOCKS(args.mp);
	/*
	 * First try to allocate inodes contiguous with the last-allocated
	 * chunk of inodes.  If the filesystem is striped, this will fill
	 * an entire stripe unit with inodes.
 	 */
	agi = XFS_BUF_TO_AGI(agbp);
	newino = be32_to_cpu(agi->agi_newino);
	agno = be32_to_cpu(agi->agi_seqno);
	args.agbno = XFS_AGINO_TO_AGBNO(args.mp, newino) +
			XFS_IALLOC_BLOCKS(args.mp);
	if (likely(newino != NULLAGINO &&
		  (args.agbno < be32_to_cpu(agi->agi_length)))) {
		args.fsbno = XFS_AGB_TO_FSB(args.mp, agno, args.agbno);
		args.type = XFS_ALLOCTYPE_THIS_BNO;
		args.mod = args.total = args.wasdel = args.isfl =
			args.userdata = args.minalignslop = 0;
		args.prod = 1;

		/*
		 * We need to take into account alignment here to ensure that
		 * we don't modify the free list if we fail to have an exact
		 * block. If we don't have an exact match, and every oher
		 * attempt allocation attempt fails, we'll end up cancelling
		 * a dirty transaction and shutting down.
		 *
		 * For an exact allocation, alignment must be 1,
		 * however we need to take cluster alignment into account when
		 * fixing up the freelist. Use the minalignslop field to
		 * indicate that extra blocks might be required for alignment,
		 * but not to use them in the actual exact allocation.
		 */
		args.alignment = 1;
		args.minalignslop = xfs_ialloc_cluster_alignment(&args) - 1;

		/* Allow space for the inode btree to split. */
		args.minleft = args.mp->m_in_maxlevels - 1;
		if ((error = xfs_alloc_vextent(&args)))
			return error;
	} else
		args.fsbno = NULLFSBLOCK;

	if (unlikely(args.fsbno == NULLFSBLOCK)) {
		/*
		 * Set the alignment for the allocation.
		 * If stripe alignment is turned on then align at stripe unit
		 * boundary.
		 * If the cluster size is smaller than a filesystem block
		 * then we're doing I/O for inodes in filesystem block size
		 * pieces, so don't need alignment anyway.
		 */
		isaligned = 0;
		if (args.mp->m_sinoalign) {
			ASSERT(!(args.mp->m_flags & XFS_MOUNT_NOALIGN));
			args.alignment = args.mp->m_dalign;
			isaligned = 1;
		} else
			args.alignment = xfs_ialloc_cluster_alignment(&args);
		/*
		 * Need to figure out where to allocate the inode blocks.
		 * Ideally they should be spaced out through the a.g.
		 * For now, just allocate blocks up front.
		 */
		args.agbno = be32_to_cpu(agi->agi_root);
		args.fsbno = XFS_AGB_TO_FSB(args.mp, agno, args.agbno);
		/*
		 * Allocate a fixed-size extent of inodes.
		 */
		args.type = XFS_ALLOCTYPE_NEAR_BNO;
		args.mod = args.total = args.wasdel = args.isfl =
			args.userdata = args.minalignslop = 0;
		args.prod = 1;
		/*
		 * Allow space for the inode btree to split.
		 */
		args.minleft = args.mp->m_in_maxlevels - 1;
		if ((error = xfs_alloc_vextent(&args)))
			return error;
	}

	/*
	 * If stripe alignment is turned on, then try again with cluster
	 * alignment.
	 */
	if (isaligned && args.fsbno == NULLFSBLOCK) {
		args.type = XFS_ALLOCTYPE_NEAR_BNO;
		args.agbno = be32_to_cpu(agi->agi_root);
		args.fsbno = XFS_AGB_TO_FSB(args.mp, agno, args.agbno);
		args.alignment = xfs_ialloc_cluster_alignment(&args);
		if ((error = xfs_alloc_vextent(&args)))
			return error;
	}

	if (args.fsbno == NULLFSBLOCK) {
		*alloc = 0;
		return 0;
	}
	ASSERT(args.len == args.minlen);

	/*
	 * Stamp and write the inode buffers.
	 *
	 * Seed the new inode cluster with a random generation number. This
	 * prevents short-term reuse of generation numbers if a chunk is
	 * freed and then immediately reallocated. We use random numbers
	 * rather than a linear progression to prevent the next generation
	 * number from being easily guessable.
	 */
	error = xfs_ialloc_inode_init(args.mp, tp, agno, args.agbno,
			args.len, random32());

	if (error)
		return error;
	/*
	 * Convert the results.
	 */
	newino = XFS_OFFBNO_TO_AGINO(args.mp, args.agbno, 0);
	be32_add_cpu(&agi->agi_count, newlen);
	be32_add_cpu(&agi->agi_freecount, newlen);
	pag = xfs_perag_get(args.mp, agno);
	pag->pagi_freecount += newlen;
	xfs_perag_put(pag);
	agi->agi_newino = cpu_to_be32(newino);

	/*
	 * Insert records describing the new inode chunk into the btree.
	 */
	cur = xfs_inobt_init_cursor(args.mp, tp, agbp, agno);
	for (thisino = newino;
	     thisino < newino + newlen;
	     thisino += XFS_INODES_PER_CHUNK) {
		cur->bc_rec.i.ir_startino = thisino;
		cur->bc_rec.i.ir_freecount = XFS_INODES_PER_CHUNK;
		cur->bc_rec.i.ir_free = XFS_INOBT_ALL_FREE;
		error = xfs_btree_lookup(cur, XFS_LOOKUP_EQ, &i);
		if (error) {
			xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
			return error;
		}
		ASSERT(i == 0);
		error = xfs_btree_insert(cur, &i);
		if (error) {
			xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
			return error;
		}
		ASSERT(i == 1);
	}
	xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
	/*
	 * Log allocation group header fields
	 */
	xfs_ialloc_log_agi(tp, agbp,
		XFS_AGI_COUNT | XFS_AGI_FREECOUNT | XFS_AGI_NEWINO);
	/*
	 * Modify/log superblock values for inode count and inode free count.
	 */
	xfs_trans_mod_sb(tp, XFS_TRANS_SB_ICOUNT, (long)newlen);
	xfs_trans_mod_sb(tp, XFS_TRANS_SB_IFREE, (long)newlen);
	*alloc = 1;
	return 0;
}
コード例 #20
0
ファイル: sun.c プロジェクト: Antonio-Zhou/Linux-2.6.11
int sun_partition(struct parsed_partitions *state, struct block_device *bdev)
{
	int i;
	__be16 csum;
	int slot = 1;
	__be16 *ush;
	Sector sect;
	struct sun_disklabel {
		unsigned char info[128];   /* Informative text string */
		unsigned char spare0[14];
		struct sun_info {
			unsigned char spare1;
			unsigned char id;
			unsigned char spare2;
			unsigned char flags;
		} infos[8];
		unsigned char spare[246];  /* Boot information etc. */
		__be16 rspeed;     /* Disk rotational speed */
		__be16 pcylcount;  /* Physical cylinder count */
		__be16 sparecyl;   /* extra sects per cylinder */
		unsigned char spare2[4];   /* More magic... */
		__be16 ilfact;     /* Interleave factor */
		__be16 ncyl;       /* Data cylinder count */
		__be16 nacyl;      /* Alt. cylinder count */
		__be16 ntrks;      /* Tracks per cylinder */
		__be16 nsect;      /* Sectors per track */
		unsigned char spare3[4];   /* Even more magic... */
		struct sun_partition {
			__be32 start_cylinder;
			__be32 num_sectors;
		} partitions[8];
		__be16 magic;      /* Magic number */
		__be16 csum;       /* Label xor'd checksum */
	} * label;		
	struct sun_partition *p;
	unsigned long spc;
	char b[BDEVNAME_SIZE];

	label = (struct sun_disklabel *)read_dev_sector(bdev, 0, &sect);
	if (!label)
		return -1;

	p = label->partitions;
	if (be16_to_cpu(label->magic) != SUN_LABEL_MAGIC) {
/*		printk(KERN_INFO "Dev %s Sun disklabel: bad magic %04x\n",
		       bdevname(bdev, b), be16_to_cpu(label->magic)); */
		put_dev_sector(sect);
		return 0;
	}
	/* Look at the checksum */
	ush = ((__be16 *) (label+1)) - 1;
	for (csum = 0; ush >= ((__be16 *) label);)
		csum ^= *ush--;
	if (csum) {
		printk("Dev %s Sun disklabel: Csum bad, label corrupted\n",
		       bdevname(bdev, b));
		put_dev_sector(sect);
		return 0;
	}

	/* All Sun disks have 8 partition entries */
	spc = be16_to_cpu(label->ntrks) * be16_to_cpu(label->nsect);
	for (i = 0; i < 8; i++, p++) {
		unsigned long st_sector;
		int num_sectors;

		st_sector = be32_to_cpu(p->start_cylinder) * spc;
		num_sectors = be32_to_cpu(p->num_sectors);
		if (num_sectors) {
			put_partition(state, slot, st_sector, num_sectors);
			if (label->infos[i].id == LINUX_RAID_PARTITION)
				state->parts[slot].flags = 1;
		}
		slot++;
	}
	printk("\n");
	put_dev_sector(sect);
	return 1;
}
コード例 #21
0
ファイル: tpm-interface.c プロジェクト: AK101111/linux
/*
 * Internal kernel interface to transmit TPM commands
 */
ssize_t tpm_transmit(struct tpm_chip *chip, const char *buf,
		     size_t bufsiz)
{
	ssize_t rc;
	u32 count, ordinal;
	unsigned long stop;

	if (bufsiz > TPM_BUFSIZE)
		bufsiz = TPM_BUFSIZE;

	count = be32_to_cpu(*((__be32 *) (buf + 2)));
	ordinal = be32_to_cpu(*((__be32 *) (buf + 6)));
	if (count == 0)
		return -ENODATA;
	if (count > bufsiz) {
		dev_err(&chip->dev,
			"invalid count value %x %zx\n", count, bufsiz);
		return -E2BIG;
	}

	mutex_lock(&chip->tpm_mutex);

	rc = chip->ops->send(chip, (u8 *) buf, count);
	if (rc < 0) {
		dev_err(&chip->dev,
			"tpm_transmit: tpm_send: error %zd\n", rc);
		goto out;
	}

	if (chip->flags & TPM_CHIP_FLAG_IRQ)
		goto out_recv;

	if (chip->flags & TPM_CHIP_FLAG_TPM2)
		stop = jiffies + tpm2_calc_ordinal_duration(chip, ordinal);
	else
		stop = jiffies + tpm_calc_ordinal_duration(chip, ordinal);
	do {
		u8 status = chip->ops->status(chip);
		if ((status & chip->ops->req_complete_mask) ==
		    chip->ops->req_complete_val)
			goto out_recv;

		if (chip->ops->req_canceled(chip, status)) {
			dev_err(&chip->dev, "Operation Canceled\n");
			rc = -ECANCELED;
			goto out;
		}

		msleep(TPM_TIMEOUT);	/* CHECK */
		rmb();
	} while (time_before(jiffies, stop));

	chip->ops->cancel(chip);
	dev_err(&chip->dev, "Operation Timed out\n");
	rc = -ETIME;
	goto out;

out_recv:
	rc = chip->ops->recv(chip, (u8 *) buf, bufsiz);
	if (rc < 0)
		dev_err(&chip->dev,
			"tpm_transmit: tpm_recv: error %zd\n", rc);
out:
	mutex_unlock(&chip->tpm_mutex);
	return rc;
}
コード例 #22
0
static void handle_adsp_rtos_mtoa_app(struct rpc_request_hdr *req)
{
	struct rpc_adsp_rtos_modem_to_app_args_t *args =
		(struct rpc_adsp_rtos_modem_to_app_args_t *)req;
	uint32_t event;
	uint32_t proc_id;
#if defined(CONFIG_ARCH_MSM7227)
#else
	uint32_t desc_field;
#endif
	uint32_t module_id;
	uint32_t image;
	struct msm_adsp_module *module;
	struct adsp_rtos_mp_mtoa_type	*pkt_ptr;
	struct queue_to_offset_type	*qptr;
	struct queue_to_offset_type	*qtbl;
#if defined(CONFIG_ARCH_MSM7227)
	struct mod_to_queue_offsets	*mqptr;
	struct mod_to_queue_offsets	*mqtbl;
#endif
	uint32_t	*mptr;
	uint32_t	*mtbl;
	uint32_t	q_idx;
	uint32_t	num_entries;
	uint32_t	entries_per_image;
	struct adsp_rtos_mp_mtoa_init_info_type *iptr;
	struct adsp_rtos_mp_mtoa_init_info_type	*sptr;
	int32_t		i_no, e_idx;

	event = be32_to_cpu(args->mtoa_pkt.mp_mtoa_header.event);
	proc_id = be32_to_cpu(args->mtoa_pkt.mp_mtoa_header.proc_id);
#if defined(CONFIG_ARCH_MSM7227)
#else
	desc_field = be32_to_cpu(args->mtoa_pkt.desc_field);
#endif


#if defined(CONFIG_ARCH_MSM7227)
	if (event == RPC_ADSP_RTOS_INIT_INFO) {
#else
	if (desc_field == RPC_ADSP_RTOS_INIT_INFO) {
#endif
		pr_info("adsp:INIT_INFO Event\n");
		sptr = &args->mtoa_pkt.adsp_rtos_mp_mtoa_data.
				mp_mtoa_init_packet;

		iptr = adsp_info.init_info_ptr;
		iptr->image_count = be32_to_cpu(sptr->image_count);
		iptr->num_queue_offsets = be32_to_cpu(sptr->num_queue_offsets);
		num_entries = iptr->num_queue_offsets;
		qptr = &sptr->queue_offsets_tbl[0][0];
		for (i_no = 0; i_no < iptr->image_count; i_no++) {
			qtbl = &iptr->queue_offsets_tbl[i_no][0];
			for (e_idx = 0; e_idx < num_entries; e_idx++) {
				qtbl[e_idx].offset = be32_to_cpu(qptr->offset);
				qtbl[e_idx].queue = be32_to_cpu(qptr->queue);
				q_idx = be32_to_cpu(qptr->queue);
				iptr->queue_offsets[i_no][q_idx] =
							qtbl[e_idx].offset;
#if 0
				pr_info("iptr->queue_offsets[%d][%d] = %x\n",
				i_no, q_idx, iptr->queue_offsets[i_no][q_idx]);
#endif
				qptr++;
			}
		}

		num_entries = be32_to_cpu(sptr->num_task_module_entries);
		iptr->num_task_module_entries = num_entries;
		entries_per_image = num_entries / iptr->image_count;
		mptr = &sptr->task_to_module_tbl[0][0];
		for (i_no = 0; i_no < iptr->image_count; i_no++) {
			mtbl = &iptr->task_to_module_tbl[i_no][0];
			for (e_idx = 0; e_idx < entries_per_image; e_idx++) {
				mtbl[e_idx] = be32_to_cpu(*mptr);
				mptr++;
#if 0
				pr_info("mtbl[%d] = %x\n", e_idx, mtbl[e_idx]);
#endif
			}
		}

		iptr->module_table_size = be32_to_cpu(sptr->module_table_size);
		mptr = &sptr->module_entries[0];
		for (i_no = 0; i_no < iptr->module_table_size; i_no++)
			iptr->module_entries[i_no] = be32_to_cpu(mptr[i_no]);

#if defined(CONFIG_ARCH_MSM7227)
		mqptr = &sptr->mod_to_q_tbl[0];
		mqtbl = &iptr->mod_to_q_tbl[0];
		iptr->mod_to_q_entries = be32_to_cpu(sptr->mod_to_q_entries);
		for (e_idx = 0; e_idx < iptr->mod_to_q_entries; e_idx++) {
			mqtbl[e_idx].module = be32_to_cpu(mqptr->module);
			mqtbl[e_idx].q_type = be32_to_cpu(mqptr->q_type);
			mqtbl[e_idx].q_max_len = be32_to_cpu(mqptr->q_max_len);
			mqptr++;
		}
#endif

		adsp_info.init_info_state = ADSP_STATE_INIT_INFO;
		wake_up(&adsp_info.init_info_wait);
		rpc_send_accepted_void_reply(rpc_cb_server_client, req->xid,
						RPC_ACCEPTSTAT_SUCCESS);

		return;
	}

	pkt_ptr = &args->mtoa_pkt.adsp_rtos_mp_mtoa_data.mp_mtoa_packet;
	module_id = be32_to_cpu(pkt_ptr->module);
	image     = be32_to_cpu(pkt_ptr->image);

	pr_info("adsp: rpc event=%d, proc_id=%d, module=%d, image=%d\n",
		event, proc_id, module_id, image);

	module = find_adsp_module_by_id(&adsp_info, module_id);
	if (!module) {
		pr_err("adsp: module %d is not supported!\n", module_id);
		rpc_send_accepted_void_reply(rpc_cb_server_client, req->xid,
				RPC_ACCEPTSTAT_GARBAGE_ARGS);
		return;
	}

	mutex_lock(&module->lock);
	switch (event) {
	case RPC_ADSP_RTOS_MOD_READY:
		pr_info("adsp: module %s: READY\n", module->name);
		module->state = ADSP_STATE_ENABLED;
		wake_up(&module->state_wait);
		adsp_set_image(module->info, image);
		break;
	case RPC_ADSP_RTOS_MOD_DISABLE:
		pr_info("adsp: module %s: DISABLED\n", module->name);
		module->state = ADSP_STATE_DISABLED;
		wake_up(&module->state_wait);
		break;
	case RPC_ADSP_RTOS_SERVICE_RESET:
		pr_info("adsp: module %s: SERVICE_RESET\n", module->name);
		module->state = ADSP_STATE_DISABLED;
		wake_up(&module->state_wait);
		break;
	case RPC_ADSP_RTOS_CMD_SUCCESS:
		pr_info("adsp: module %s: CMD_SUCCESS\n", module->name);
		break;
	case RPC_ADSP_RTOS_CMD_FAIL:
		pr_info("adsp: module %s: CMD_FAIL\n", module->name);
		break;
	case RPC_ADSP_RTOS_DISABLE_FAIL:
		pr_info("adsp: module %s: DISABLE_FAIL\n", module->name);
		break;
	default:
		pr_info("adsp: unknown event %d\n", event);
		rpc_send_accepted_void_reply(rpc_cb_server_client, req->xid,
					     RPC_ACCEPTSTAT_GARBAGE_ARGS);
		goto done;
	}
	rpc_send_accepted_void_reply(rpc_cb_server_client, req->xid,
				     RPC_ACCEPTSTAT_SUCCESS);

	if (module->ops->modem_event != NULL)
		module->ops->modem_event(module->driver_data, image);
done:
	mutex_unlock(&module->lock);
	event_addr = (uint32_t *)req;
	module->ops->event(module->driver_data, EVENT_MSG_ID,
				EVENT_LEN, read_event);
}

static int handle_adsp_rtos_mtoa(struct rpc_request_hdr *req)
{
	switch (req->procedure) {
	case RPC_ADSP_RTOS_MTOA_NULL_PROC:
		rpc_send_accepted_void_reply(rpc_cb_server_client,
					     req->xid,
					     RPC_ACCEPTSTAT_SUCCESS);
		break;
#if defined(CONFIG_ARCH_MSM7227)
	case RPC_ADSP_RTOS_MODEM_TO_APP_INIT_INFO_PROC:
	case RPC_ADSP_RTOS_MODEM_TO_APP_EVENT_INFO_PROC:
#else
	case RPC_ADSP_RTOS_MODEM_TO_APP_PROC:
#endif
		handle_adsp_rtos_mtoa_app(req);
		break;
	default:
		pr_err("adsp: unknowned proc %d\n", req->procedure);
		rpc_send_accepted_void_reply(
			rpc_cb_server_client, req->xid,
			RPC_ACCEPTSTAT_PROC_UNAVAIL);
		break;
	}
	return 0;
}

/* this should be common code with rpc_servers.c */
static int adsp_rpc_thread(void *data)
{
	void *buffer;
	struct rpc_request_hdr *req;
	int rc, exit = 0;

	do {
		rc = msm_rpc_read(rpc_cb_server_client, &buffer, -1, -1);
		if (rc < 0) {
			pr_err("adsp: could not read rpc: %d\n", rc);
			break;
		}
		req = (struct rpc_request_hdr *)buffer;

		req->type = be32_to_cpu(req->type);
		req->xid = be32_to_cpu(req->xid);
		req->rpc_vers = be32_to_cpu(req->rpc_vers);
		req->prog = be32_to_cpu(req->prog);
		req->vers = be32_to_cpu(req->vers);
		req->procedure = be32_to_cpu(req->procedure);

		if (req->type != 0)
			goto bad_rpc;
		if (req->rpc_vers != 2)
			goto bad_rpc;
		if (req->prog != rpc_adsp_rtos_mtoa_prog)
			goto bad_rpc;
		if (req->vers != rpc_adsp_rtos_mtoa_vers)
			goto bad_rpc;

		handle_adsp_rtos_mtoa(req);
		kfree(buffer);
		continue;

bad_rpc:
		pr_err("adsp: bogus rpc from modem\n");
		kfree(buffer);
	} while (!exit);
	do_exit(0);
}
コード例 #23
0
ファイル: tpm-interface.c プロジェクト: AK101111/linux
/*
 * We are about to suspend. Save the TPM state
 * so that it can be restored.
 */
int tpm_pm_suspend(struct device *dev)
{
	struct tpm_chip *chip = dev_get_drvdata(dev);
	struct tpm_cmd_t cmd;
	int rc, try;

	u8 dummy_hash[TPM_DIGEST_SIZE] = { 0 };

	if (chip == NULL)
		return -ENODEV;

	if (chip->flags & TPM_CHIP_FLAG_TPM2) {
		tpm2_shutdown(chip, TPM2_SU_STATE);
		return 0;
	}

	/* for buggy tpm, flush pcrs with extend to selected dummy */
	if (tpm_suspend_pcr) {
		cmd.header.in = pcrextend_header;
		cmd.params.pcrextend_in.pcr_idx = cpu_to_be32(tpm_suspend_pcr);
		memcpy(cmd.params.pcrextend_in.hash, dummy_hash,
		       TPM_DIGEST_SIZE);
		rc = tpm_transmit_cmd(chip, &cmd, EXTEND_PCR_RESULT_SIZE,
				      "extending dummy pcr before suspend");
	}

	/* now do the actual savestate */
	for (try = 0; try < TPM_RETRY; try++) {
		cmd.header.in = savestate_header;
		rc = tpm_transmit_cmd(chip, &cmd, SAVESTATE_RESULT_SIZE, NULL);

		/*
		 * If the TPM indicates that it is too busy to respond to
		 * this command then retry before giving up.  It can take
		 * several seconds for this TPM to be ready.
		 *
		 * This can happen if the TPM has already been sent the
		 * SaveState command before the driver has loaded.  TCG 1.2
		 * specification states that any communication after SaveState
		 * may cause the TPM to invalidate previously saved state.
		 */
		if (rc != TPM_WARN_RETRY)
			break;
		msleep(TPM_TIMEOUT_RETRY);
	}

	if (rc)
		dev_err(&chip->dev,
			"Error (%d) sending savestate before suspend\n", rc);
	else if (try > 0)
		dev_warn(&chip->dev, "TPM savestate took %dms\n",
			 try * TPM_TIMEOUT_RETRY);

	return rc;
}
EXPORT_SYMBOL_GPL(tpm_pm_suspend);

/*
 * Resume from a power safe. The BIOS already restored
 * the TPM state.
 */
int tpm_pm_resume(struct device *dev)
{
	struct tpm_chip *chip = dev_get_drvdata(dev);

	if (chip == NULL)
		return -ENODEV;

	return 0;
}
EXPORT_SYMBOL_GPL(tpm_pm_resume);

#define TPM_GETRANDOM_RESULT_SIZE	18
static struct tpm_input_header tpm_getrandom_header = {
	.tag = TPM_TAG_RQU_COMMAND,
	.length = cpu_to_be32(14),
	.ordinal = TPM_ORD_GET_RANDOM
};

/**
 * tpm_get_random() - Get random bytes from the tpm's RNG
 * @chip_num: A specific chip number for the request or TPM_ANY_NUM
 * @out: destination buffer for the random bytes
 * @max: the max number of bytes to write to @out
 *
 * Returns < 0 on error and the number of bytes read on success
 */
int tpm_get_random(u32 chip_num, u8 *out, size_t max)
{
	struct tpm_chip *chip;
	struct tpm_cmd_t tpm_cmd;
	u32 recd, num_bytes = min_t(u32, max, TPM_MAX_RNG_DATA);
	int err, total = 0, retries = 5;
	u8 *dest = out;

	if (!out || !num_bytes || max > TPM_MAX_RNG_DATA)
		return -EINVAL;

	chip = tpm_chip_find_get(chip_num);
	if (chip == NULL)
		return -ENODEV;

	if (chip->flags & TPM_CHIP_FLAG_TPM2) {
		err = tpm2_get_random(chip, out, max);
		tpm_put_ops(chip);
		return err;
	}

	do {
		tpm_cmd.header.in = tpm_getrandom_header;
		tpm_cmd.params.getrandom_in.num_bytes = cpu_to_be32(num_bytes);

		err = tpm_transmit_cmd(chip, &tpm_cmd,
				   TPM_GETRANDOM_RESULT_SIZE + num_bytes,
				   "attempting get random");
		if (err)
			break;

		recd = be32_to_cpu(tpm_cmd.params.getrandom_out.rng_data_len);
		memcpy(dest, tpm_cmd.params.getrandom_out.rng_data, recd);

		dest += recd;
		total += recd;
		num_bytes -= recd;
	} while (retries-- && total < max);

	tpm_put_ops(chip);
	return total ? total : -EIO;
}
EXPORT_SYMBOL_GPL(tpm_get_random);

/**
 * tpm_seal_trusted() - seal a trusted key
 * @chip_num: A specific chip number for the request or TPM_ANY_NUM
 * @options: authentication values and other options
 * @payload: the key data in clear and encrypted form
 *
 * Returns < 0 on error and 0 on success. At the moment, only TPM 2.0 chips
 * are supported.
 */
int tpm_seal_trusted(u32 chip_num, struct trusted_key_payload *payload,
		     struct trusted_key_options *options)
{
	struct tpm_chip *chip;
	int rc;

	chip = tpm_chip_find_get(chip_num);
	if (chip == NULL || !(chip->flags & TPM_CHIP_FLAG_TPM2))
		return -ENODEV;

	rc = tpm2_seal_trusted(chip, payload, options);

	tpm_put_ops(chip);
	return rc;
}
EXPORT_SYMBOL_GPL(tpm_seal_trusted);

/**
 * tpm_unseal_trusted() - unseal a trusted key
 * @chip_num: A specific chip number for the request or TPM_ANY_NUM
 * @options: authentication values and other options
 * @payload: the key data in clear and encrypted form
 *
 * Returns < 0 on error and 0 on success. At the moment, only TPM 2.0 chips
 * are supported.
 */
int tpm_unseal_trusted(u32 chip_num, struct trusted_key_payload *payload,
		       struct trusted_key_options *options)
{
	struct tpm_chip *chip;
	int rc;

	chip = tpm_chip_find_get(chip_num);
	if (chip == NULL || !(chip->flags & TPM_CHIP_FLAG_TPM2))
		return -ENODEV;

	rc = tpm2_unseal_trusted(chip, payload, options);

	tpm_put_ops(chip);

	return rc;
}
EXPORT_SYMBOL_GPL(tpm_unseal_trusted);

static int __init tpm_init(void)
{
	int rc;

	tpm_class = class_create(THIS_MODULE, "tpm");
	if (IS_ERR(tpm_class)) {
		pr_err("couldn't create tpm class\n");
		return PTR_ERR(tpm_class);
	}

	rc = alloc_chrdev_region(&tpm_devt, 0, TPM_NUM_DEVICES, "tpm");
	if (rc < 0) {
		pr_err("tpm: failed to allocate char dev region\n");
		class_destroy(tpm_class);
		return rc;
	}

	return 0;
}

static void __exit tpm_exit(void)
{
	idr_destroy(&dev_nums_idr);
	class_destroy(tpm_class);
	unregister_chrdev_region(tpm_devt, TPM_NUM_DEVICES);
}
コード例 #24
0
static u32 mlx4_en_free_tx_desc(struct mlx4_en_priv *priv,
				struct mlx4_en_tx_ring *ring,
				int index, u8 owner)
{
	struct mlx4_en_dev *mdev = priv->mdev;
	struct mlx4_en_tx_info *tx_info = &ring->tx_info[index];
	struct mlx4_en_tx_desc *tx_desc = ring->buf + index * TXBB_SIZE;
	struct mlx4_wqe_data_seg *data = (void *) tx_desc + tx_info->data_offset;
	struct sk_buff *skb = tx_info->skb;
	struct skb_frag_struct *frag;
	void *end = ring->buf + ring->buf_size;
	int frags = skb_shinfo(skb)->nr_frags;
	int i;
	__be32 *ptr = (__be32 *)tx_desc;
	__be32 stamp = cpu_to_be32(STAMP_VAL | (!!owner << STAMP_SHIFT));

	/* Optimize the common case when there are no wraparounds */
	if (likely((void *) tx_desc + tx_info->nr_txbb * TXBB_SIZE <= end)) {
		if (!tx_info->inl) {
			if (tx_info->linear) {
				pci_unmap_single(mdev->pdev,
					(dma_addr_t) be64_to_cpu(data->addr),
					 be32_to_cpu(data->byte_count),
					 PCI_DMA_TODEVICE);
				++data;
			}

			for (i = 0; i < frags; i++) {
				frag = &skb_shinfo(skb)->frags[i];
				pci_unmap_page(mdev->pdev,
					(dma_addr_t) be64_to_cpu(data[i].addr),
					frag->size, PCI_DMA_TODEVICE);
			}
		}
		/* Stamp the freed descriptor */
		for (i = 0; i < tx_info->nr_txbb * TXBB_SIZE; i += STAMP_STRIDE) {
			*ptr = stamp;
			ptr += STAMP_DWORDS;
		}

	} else {
		if (!tx_info->inl) {
			if ((void *) data >= end) {
				data = (struct mlx4_wqe_data_seg *)
						(ring->buf + ((void *) data - end));
			}

			if (tx_info->linear) {
				pci_unmap_single(mdev->pdev,
					(dma_addr_t) be64_to_cpu(data->addr),
					 be32_to_cpu(data->byte_count),
					 PCI_DMA_TODEVICE);
				++data;
			}

			for (i = 0; i < frags; i++) {
				/* Check for wraparound before unmapping */
				if ((void *) data >= end)
					data = (struct mlx4_wqe_data_seg *) ring->buf;
				frag = &skb_shinfo(skb)->frags[i];
				pci_unmap_page(mdev->pdev,
					(dma_addr_t) be64_to_cpu(data->addr),
					 frag->size, PCI_DMA_TODEVICE);
				++data;
			}
		}
		/* Stamp the freed descriptor */
		for (i = 0; i < tx_info->nr_txbb * TXBB_SIZE; i += STAMP_STRIDE) {
			*ptr = stamp;
			ptr += STAMP_DWORDS;
			if ((void *) ptr >= end) {
				ptr = ring->buf;
				stamp ^= cpu_to_be32(0x80000000);
			}
		}

	}
	dev_kfree_skb_any(skb);
	return tx_info->nr_txbb;
}
コード例 #25
0
ファイル: agheader.c プロジェクト: Claruarius/stblinux-2.6.37
static int
verify_set_agi(xfs_mount_t *mp, xfs_agi_t *agi, xfs_agnumber_t agno)
{
	xfs_drfsbno_t agblocks;
	int retval = 0;

	/* check common fields */

	if (be32_to_cpu(agi->agi_magicnum) != XFS_AGI_MAGIC)  {
		retval = XR_AG_AGI;
		do_warn(_("bad magic # 0x%x for agi %d\n"),
			be32_to_cpu(agi->agi_magicnum), agno);

		if (!no_modify)
			agi->agi_magicnum = cpu_to_be32(XFS_AGI_MAGIC);
	}

	if (!XFS_AGI_GOOD_VERSION(be32_to_cpu(agi->agi_versionnum)))  {
		retval = XR_AG_AGI;
		do_warn(_("bad version # %d for agi %d\n"),
			be32_to_cpu(agi->agi_versionnum), agno);

		if (!no_modify)
			agi->agi_versionnum = cpu_to_be32(XFS_AGI_VERSION);
	}

	if (be32_to_cpu(agi->agi_seqno) != agno)  {
		retval = XR_AG_AGI;
		do_warn(_("bad sequence # %d for agi %d\n"),
			be32_to_cpu(agi->agi_seqno), agno);

		if (!no_modify)
			agi->agi_seqno = cpu_to_be32(agno);
	}

	if (be32_to_cpu(agi->agi_length) != mp->m_sb.sb_agblocks)  {
		if (agno != mp->m_sb.sb_agcount - 1)  {
			retval = XR_AG_AGI;
			do_warn(_("bad length # %d for agi %d, should be %d\n"),
				be32_to_cpu(agi->agi_length), agno,
					mp->m_sb.sb_agblocks);
			if (!no_modify)
				agi->agi_length = 
					cpu_to_be32(mp->m_sb.sb_agblocks);
		} else  {
			agblocks = mp->m_sb.sb_dblocks -
				(xfs_drfsbno_t) mp->m_sb.sb_agblocks * agno;

			if (be32_to_cpu(agi->agi_length) != agblocks)  {
				retval = XR_AG_AGI;
				do_warn(
	_("bad length # %d for agi %d, should be %" PRIu64 "\n"),
					be32_to_cpu(agi->agi_length),
						agno, agblocks);
				if (!no_modify)
					agi->agi_length = cpu_to_be32(agblocks);
			}
		}
	}

	/* don't check inode btree -- will be checked by caller */

	return(retval);
}
コード例 #26
0
ファイル: mac.c プロジェクト: flwh/Alcatel_OT_985_kernel
int mac_partition(struct parsed_partitions *state)
{
	int slot = 1;
	Sector sect;
	unsigned char *data;
	int blk, blocks_in_map;
	unsigned secsize;
#ifdef CONFIG_PPC_PMAC
	int found_root = 0;
	int found_root_goodness = 0;
#endif
	struct mac_partition *part;
	struct mac_driver_desc *md;

	/* Get 0th block and look at the first partition map entry. */
	md = read_part_sector(state, 0, &sect);
	if (!md)
		return -1;
	if (be16_to_cpu(md->signature) != MAC_DRIVER_MAGIC) {
		put_dev_sector(sect);
		return 0;
	}
	secsize = be16_to_cpu(md->block_size);
	put_dev_sector(sect);
	data = read_part_sector(state, secsize/512, &sect);
	if (!data)
		return -1;
	part = (struct mac_partition *) (data + secsize%512);
	if (be16_to_cpu(part->signature) != MAC_PARTITION_MAGIC) {
		put_dev_sector(sect);
		return 0;		/* not a MacOS disk */
	}
	printk(" [mac]");
	blocks_in_map = be32_to_cpu(part->map_count);
	for (blk = 1; blk <= blocks_in_map; ++blk) {
		int pos = blk * secsize;
		put_dev_sector(sect);
		data = read_part_sector(state, pos/512, &sect);
		if (!data)
			return -1;
		part = (struct mac_partition *) (data + pos%512);
		if (be16_to_cpu(part->signature) != MAC_PARTITION_MAGIC)
			break;
		put_partition(state, slot,
			be32_to_cpu(part->start_block) * (secsize/512),
			be32_to_cpu(part->block_count) * (secsize/512));

		if (!strnicmp(part->type, "Linux_RAID", 10))
			state->parts[slot].flags = ADDPART_FLAG_RAID;
#ifdef CONFIG_PPC_PMAC
		/*
		 * If this is the first bootable partition, tell the
		 * setup code, in case it wants to make this the root.
		 */
		if (machine_is(powermac)) {
			int goodness = 0;

			mac_fix_string(part->processor, 16);
			mac_fix_string(part->name, 32);
			mac_fix_string(part->type, 32);					
		    
			if ((be32_to_cpu(part->status) & MAC_STATUS_BOOTABLE)
			    && strcasecmp(part->processor, "powerpc") == 0)
				goodness++;

			if (strcasecmp(part->type, "Apple_UNIX_SVR2") == 0
			    || (strnicmp(part->type, "Linux", 5) == 0
			        && strcasecmp(part->type, "Linux_swap") != 0)) {
				int i, l;

				goodness++;
				l = strlen(part->name);
				if (strcmp(part->name, "/") == 0)
					goodness++;
				for (i = 0; i <= l - 4; ++i) {
					if (strnicmp(part->name + i, "root",
						     4) == 0) {
						goodness += 2;
						break;
					}
				}
				if (strnicmp(part->name, "swap", 4) == 0)
					goodness--;
			}

			if (goodness > found_root_goodness) {
				found_root = blk;
				found_root_goodness = goodness;
			}
		}
#endif /* CONFIG_PPC_PMAC */

		++slot;
	}
#ifdef CONFIG_PPC_PMAC
	if (found_root_goodness)
		note_bootable_part(state->bdev->bd_dev, found_root,
				   found_root_goodness);
#endif

	put_dev_sector(sect);
	printk("\n");
	return 1;
}
コード例 #27
0
ファイル: htc_hst.c プロジェクト: ARMP/android_kernel_lge_x3
/*
 * HTC Messages are handled directly here and the obtained SKB
 * is freed.
 *
 * Service messages (Data, WMI) passed to the corresponding
 * endpoint RX handlers, which have to free the SKB.
 */
void ath9k_htc_rx_msg(struct htc_target *htc_handle,
		      struct sk_buff *skb, u32 len, u8 pipe_id)
{
	struct htc_frame_hdr *htc_hdr;
	enum htc_endpoint_id epid;
	struct htc_endpoint *endpoint;
	__be16 *msg_id;

	if (!htc_handle || !skb)
		return;

	htc_hdr = (struct htc_frame_hdr *) skb->data;
	epid = htc_hdr->endpoint_id;

	if (epid >= ENDPOINT_MAX) {
		if (pipe_id != USB_REG_IN_PIPE)
			dev_kfree_skb_any(skb);
		else
			kfree_skb(skb);
		return;
	}

	if (epid == ENDPOINT0) {

		/* Handle trailer */
		if (htc_hdr->flags & HTC_FLAGS_RECV_TRAILER) {
			if (be32_to_cpu(*(__be32 *) skb->data) == 0x00C60000)
				/* Move past the Watchdog pattern */
				htc_hdr = (struct htc_frame_hdr *)(skb->data + 4);
		}

		/* Get the message ID */
		msg_id = (__be16 *) ((void *) htc_hdr +
				     sizeof(struct htc_frame_hdr));

		/* Now process HTC messages */
		switch (be16_to_cpu(*msg_id)) {
		case HTC_MSG_READY_ID:
			htc_process_target_rdy(htc_handle, htc_hdr);
			break;
		case HTC_MSG_CONNECT_SERVICE_RESPONSE_ID:
			htc_process_conn_rsp(htc_handle, htc_hdr);
			break;
		default:
			break;
		}

		kfree_skb(skb);

	} else {
		if (htc_hdr->flags & HTC_FLAGS_RECV_TRAILER)
			skb_trim(skb, len - htc_hdr->control[0]);

		skb_pull(skb, sizeof(struct htc_frame_hdr));

		endpoint = &htc_handle->endpoint[epid];
		if (endpoint->ep_callbacks.rx)
			endpoint->ep_callbacks.rx(endpoint->ep_callbacks.priv,
						  skb, epid);
	}
}
コード例 #28
0
static int gfs2_get_name(struct dentry *parent, char *name,
			 struct dentry *child)
{
	struct inode *dir = parent->d_inode;
	struct inode *inode = child->d_inode;
	struct gfs2_inode *dip, *ip;
	struct get_name_filldir gnfd;
	struct gfs2_holder gh;
	u64 offset = 0;
	int error;
	struct file_ra_state f_ra = { .start = 0 };

	if (!dir)
		return -EINVAL;

	if (!S_ISDIR(dir->i_mode) || !inode)
		return -EINVAL;

	dip = GFS2_I(dir);
	ip = GFS2_I(inode);

	*name = 0;
	gnfd.inum.no_addr = ip->i_no_addr;
	gnfd.inum.no_formal_ino = ip->i_no_formal_ino;
	gnfd.name = name;

	error = gfs2_glock_nq_init(dip->i_gl, LM_ST_SHARED, 0, &gh);
	if (error)
		return error;

	error = gfs2_dir_read(dir, &offset, &gnfd, get_name_filldir, &f_ra);

	gfs2_glock_dq_uninit(&gh);

	if (!error && !*name)
		error = -ENOENT;

	return error;
}

static struct dentry *gfs2_get_parent(struct dentry *child)
{
	return d_obtain_alias(gfs2_lookupi(child->d_inode, &gfs2_qdotdot, 1));
}

static struct dentry *gfs2_get_dentry(struct super_block *sb,
				      struct gfs2_inum_host *inum)
{
	struct gfs2_sbd *sdp = sb->s_fs_info;
	struct inode *inode;

	inode = gfs2_ilookup(sb, inum->no_addr, 0);
	if (inode) {
		if (GFS2_I(inode)->i_no_formal_ino != inum->no_formal_ino) {
			iput(inode);
			return ERR_PTR(-ESTALE);
		}
		goto out_inode;
	}

	inode = gfs2_lookup_by_inum(sdp, inum->no_addr, &inum->no_formal_ino,
				    GFS2_BLKST_DINODE);
	if (IS_ERR(inode))
		return ERR_CAST(inode);

out_inode:
	return d_obtain_alias(inode);
}

static struct dentry *gfs2_fh_to_dentry(struct super_block *sb, struct fid *fid,
		int fh_len, int fh_type)
{
	struct gfs2_inum_host this;
	__be32 *fh = (__force __be32 *)fid->raw;

	switch (fh_type) {
	case GFS2_SMALL_FH_SIZE:
	case GFS2_LARGE_FH_SIZE:
	case GFS2_OLD_FH_SIZE:
		if (fh_len < GFS2_SMALL_FH_SIZE)
			return NULL;
		this.no_formal_ino = ((u64)be32_to_cpu(fh[0])) << 32;
		this.no_formal_ino |= be32_to_cpu(fh[1]);
		this.no_addr = ((u64)be32_to_cpu(fh[2])) << 32;
		this.no_addr |= be32_to_cpu(fh[3]);
		return gfs2_get_dentry(sb, &this);
	default:
		return NULL;
	}
}

static struct dentry *gfs2_fh_to_parent(struct super_block *sb, struct fid *fid,
		int fh_len, int fh_type)
{
	struct gfs2_inum_host parent;
	__be32 *fh = (__force __be32 *)fid->raw;

	switch (fh_type) {
	case GFS2_LARGE_FH_SIZE:
	case GFS2_OLD_FH_SIZE:
		if (fh_len < GFS2_LARGE_FH_SIZE)
			return NULL;
		parent.no_formal_ino = ((u64)be32_to_cpu(fh[4])) << 32;
		parent.no_formal_ino |= be32_to_cpu(fh[5]);
		parent.no_addr = ((u64)be32_to_cpu(fh[6])) << 32;
		parent.no_addr |= be32_to_cpu(fh[7]);
		return gfs2_get_dentry(sb, &parent);
	default:
		return NULL;
	}
}

const struct export_operations gfs2_export_ops = {
	.encode_fh = gfs2_encode_fh,
	.fh_to_dentry = gfs2_fh_to_dentry,
	.fh_to_parent = gfs2_fh_to_parent,
	.get_name = gfs2_get_name,
	.get_parent = gfs2_get_parent,
};
コード例 #29
0
ファイル: t4vf_hw.c プロジェクト: BozkurTR/kernel
/**
 *	t4vf_wr_mbox_core - send a command to FW through the mailbox
 *	@adapter: the adapter
 *	@cmd: the command to write
 *	@size: command length in bytes
 *	@rpl: where to optionally store the reply
 *	@sleep_ok: if true we may sleep while awaiting command completion
 *
 *	Sends the given command to FW through the mailbox and waits for the
 *	FW to execute the command.  If @rpl is not %NULL it is used to store
 *	the FW's reply to the command.  The command and its optional reply
 *	are of the same length.  FW can take up to 500 ms to respond.
 *	@sleep_ok determines whether we may sleep while awaiting the response.
 *	If sleeping is allowed we use progressive backoff otherwise we spin.
 *
 *	The return value is 0 on success or a negative errno on failure.  A
 *	failure can happen either because we are not able to execute the
 *	command or FW executes it but signals an error.  In the latter case
 *	the return value is the error code indicated by FW (negated).
 */
int t4vf_wr_mbox_core(struct adapter *adapter, const void *cmd, int size,
		      void *rpl, bool sleep_ok)
{
	static const int delay[] = {
		1, 1, 3, 5, 10, 10, 20, 50, 100
	};

	u32 v;
	int i, ms, delay_idx;
	const __be64 *p;
	u32 mbox_data = T4VF_MBDATA_BASE_ADDR;
	u32 mbox_ctl = T4VF_CIM_BASE_ADDR + CIM_VF_EXT_MAILBOX_CTRL;

	/*
	 * Commands must be multiples of 16 bytes in length and may not be
	 * larger than the size of the Mailbox Data register array.
	 */
	if ((size % 16) != 0 ||
	    size > NUM_CIM_VF_MAILBOX_DATA_INSTANCES * 4)
		return -EINVAL;

	/*
	 * Loop trying to get ownership of the mailbox.  Return an error
	 * if we can't gain ownership.
	 */
	v = MBOWNER_GET(t4_read_reg(adapter, mbox_ctl));
	for (i = 0; v == MBOX_OWNER_NONE && i < 3; i++)
		v = MBOWNER_GET(t4_read_reg(adapter, mbox_ctl));
	if (v != MBOX_OWNER_DRV)
		return v == MBOX_OWNER_FW ? -EBUSY : -ETIMEDOUT;

	/*
	 * Write the command array into the Mailbox Data register array and
	 * transfer ownership of the mailbox to the firmware.
	 *
	 * For the VFs, the Mailbox Data "registers" are actually backed by
	 * T4's "MA" interface rather than PL Registers (as is the case for
	 * the PFs).  Because these are in different coherency domains, the
	 * write to the VF's PL-register-backed Mailbox Control can race in
	 * front of the writes to the MA-backed VF Mailbox Data "registers".
	 * So we need to do a read-back on at least one byte of the VF Mailbox
	 * Data registers before doing the write to the VF Mailbox Control
	 * register.
	 */
	for (i = 0, p = cmd; i < size; i += 8)
		t4_write_reg64(adapter, mbox_data + i, be64_to_cpu(*p++));
	t4_read_reg(adapter, mbox_data);         /* flush write */

	t4_write_reg(adapter, mbox_ctl,
		     MBMSGVALID | MBOWNER(MBOX_OWNER_FW));
	t4_read_reg(adapter, mbox_ctl);          /* flush write */

	/*
	 * Spin waiting for firmware to acknowledge processing our command.
	 */
	delay_idx = 0;
	ms = delay[0];

	for (i = 0; i < FW_CMD_MAX_TIMEOUT; i += ms) {
		if (sleep_ok) {
			ms = delay[delay_idx];
			if (delay_idx < ARRAY_SIZE(delay) - 1)
				delay_idx++;
			msleep(ms);
		} else
			mdelay(ms);

		/*
		 * If we're the owner, see if this is the reply we wanted.
		 */
		v = t4_read_reg(adapter, mbox_ctl);
		if (MBOWNER_GET(v) == MBOX_OWNER_DRV) {
			/*
			 * If the Message Valid bit isn't on, revoke ownership
			 * of the mailbox and continue waiting for our reply.
			 */
			if ((v & MBMSGVALID) == 0) {
				t4_write_reg(adapter, mbox_ctl,
					     MBOWNER(MBOX_OWNER_NONE));
				continue;
			}

			/*
			 * We now have our reply.  Extract the command return
			 * value, copy the reply back to our caller's buffer
			 * (if specified) and revoke ownership of the mailbox.
			 * We return the (negated) firmware command return
			 * code (this depends on FW_SUCCESS == 0).
			 */

			/* return value in low-order little-endian word */
			v = t4_read_reg(adapter, mbox_data);
			if (FW_CMD_RETVAL_GET(v))
				dump_mbox(adapter, "FW Error", mbox_data);

			if (rpl) {
				/* request bit in high-order BE word */
				WARN_ON((be32_to_cpu(*(const u32 *)cmd)
					 & FW_CMD_REQUEST) == 0);
				get_mbox_rpl(adapter, rpl, size, mbox_data);
				WARN_ON((be32_to_cpu(*(u32 *)rpl)
					 & FW_CMD_REQUEST) != 0);
			}
			t4_write_reg(adapter, mbox_ctl,
				     MBOWNER(MBOX_OWNER_NONE));
			return -FW_CMD_RETVAL_GET(v);
		}
	}

	/*
	 * We timed out.  Return the error ...
	 */
	dump_mbox(adapter, "FW Timeout", mbox_data);
	return -ETIMEDOUT;
}
コード例 #30
0
ファイル: psfs-format.c プロジェクト: pranjas/psfs
int format_psfs(const char *device, u_int32_t block_size, u_int64_t nr_inodes,
			u_int64_t nr_blocks, u_int32_t min_extent_length)
{
	int dev_fd=open(device,O_RDWR);
	char *fs_block_buffer;
	off_t disk_offset = 0;
	int32_t ino = -1;
	time_t tm;
	u_int64_t inodes_written = 0,inode_bmap_blocks,bmap_blocks;
	u_int64_t total_blocks_written = 0;
	u_int64_t data_bmap_block;
	u_int64_t inode_bmap_block;
	u_int64_t tmp_var;

	if (dev_fd < 0) {
		printf("Error opening device, open returned with status %d\n",errno);
		perror("FATAL:");
		return -1;
	}
	/*long nr_512sectors;*/
	u_int64_t nr_512sectors;
	u_int32_t scaling_factor;
	if (ioctl(dev_fd,BLKGETSIZE64,&nr_512sectors) < 0 )
	{
		printf("Error getting device size, ioctl returned with status %d\n",errno);
		perror("FATAL:");
		return -1;
	}
	nr_512sectors/=KERNEL_SECTOR_SIZE;
	printf("Total 512 sectors on disk are %llu\n",nr_512sectors);
	/*
	 *Check if default values are to be used.
	 */
	if (!block_size)
		block_size = PSFS_DEFAULT_BLKSIZE;

	scaling_factor=block_size/KERNEL_SECTOR_SIZE;
	printf(PSFS_DBG_VAR("%016llX \n",scaling_factor));
	if (!nr_blocks)
		nr_blocks = (nr_512sectors)/scaling_factor;
	printf (PSFS_DBG_VAR("%lu \n",nr_blocks));

	/*
	 *Simple heuristics--> 10% of total number of blocks = number of inodes*/
	if (!nr_inodes)
		nr_inodes = nr_blocks/10;
	if (!min_extent_length)
		min_extent_length=PSFS_DEFAULT_EXTENT_LEN;

	struct psfs_super_block super;
	struct psfs_inode inode;
	memset(&inode,0,sizeof(inode));
	fs_block_buffer = calloc(1,block_size);
	if (!fs_block_buffer) {
		printf("Unable to allocate memory for formatting!\n");
		return -1;
	}
	super.psfs_nr_blocks = cpu_to_be64(nr_blocks);
	super.psfs_nr_inodes = cpu_to_be64(nr_inodes);
	super.psfs_boot_block = 0;/*cpu_to_be64(1);*/
	/*
	 * use the ceiling value.
	 */
	super.psfs_nr_boot_blocks = cpu_to_be32( 
			/*
			 * Number of blocks taken by psfs_inode structures.
			 * */
			(nr_inodes *sizeof(struct psfs_inode)/block_size)+
			(nr_inodes *sizeof(struct psfs_inode)%block_size?1:0)
					+
			/*
			 * Number of blocks taken by block bitmaps.
			 * */
			(nr_blocks/(block_size*8)+ (nr_blocks %(block_size*8)?1:0))+
			/*
			 * Number of blocks taken by inode bitmaps.
			 * */
			(nr_inodes/(block_size*8)+ (nr_inodes %(block_size*8)?1:0))
			 		+
			/*
			 * The super block always takes up the first block, so
			 * accomodate it as well.
			 * */
					1
			);
	super.psfs_min_extent_length = cpu_to_be32(min_extent_length);
	super.psfs_super_flags = 0;
	super.psfs_magic = cpu_to_be32(PSFS_MAGIC);
	super.psfs_block_size = cpu_to_be32(block_size);
	memcpy(fs_block_buffer,&super,sizeof(super));
	/*
	 * Write the super block. The super block also takes up one whole FS block
	 */
	if (write(dev_fd,fs_block_buffer,block_size) < 0)
	{
		perror("FATAL Error writing super block:\n");
		return -1;
	}
	total_blocks_written++; /* increment total blocks written.*/
	/*
	 * Write the inodes right after the super block's block. So inodes are always
	 * in a fixed location, i.e. right after the super block's block.
	 */
	while (inodes_written<nr_inodes) {
		int buffer_left = block_size;
		int buffer_inodes = 0;
		memset(fs_block_buffer,0,block_size);
		while (buffer_left>=sizeof(inode) && inodes_written < nr_inodes) {
			memcpy(fs_block_buffer+sizeof(inode)*buffer_inodes,&inode,sizeof(inode));
			buffer_left -= sizeof(inode);
			buffer_inodes++;
			inodes_written++;
		}
		if (write(dev_fd,fs_block_buffer,block_size) < 0)
		{
			perror("FATAL Error while writing inodes:");
			return -1;
		}
		total_blocks_written++; /* increment total blocks written.*/
	}
	inode_bmap_block = total_blocks_written;/*We save the block number of inode bmap*/
	/*
	 * Write the inode's bitmap. Figure out how many FS blocks are required and just
	 * zero them out.
	 */

	 /*Reusing the inodes_written variable here.*/
	 inodes_written = 0;
	 inode_bmap_blocks = (nr_inodes/(block_size*8)+ (nr_inodes %(block_size*8)?1:0));
	 memset(fs_block_buffer,0,block_size);
	 while (inodes_written < inode_bmap_blocks) {
	 	if (write(dev_fd,fs_block_buffer,block_size) < 0) {
			perror("FATAL Error while writing inode bitmap");
			return -1;
		}
		inodes_written++;
		total_blocks_written++; /* increment total blocks written.*/
	 }
	 /*
	  * Till now the super block and the inode blocks and inode bmap 
	  * have been written.
	  * We record here the next block address since now we'll write the
	  * block map and we would need to modify the allocated blocks bmap
	  * after we are done.
	  */
	 data_bmap_block = total_blocks_written; /*We save the block number for bmap*/
	 bmap_blocks = nr_blocks/(block_size*8)+ (nr_blocks %(block_size*8)?1:0);
	 printf(PSFS_DBG_VAR("%llu \n",bmap_blocks));
	 memset(fs_block_buffer,0,block_size); /*Zero out the block for bitmap*/
	 tmp_var=bmap_blocks;
	 while (tmp_var) {
	 	if (write(dev_fd,fs_block_buffer,block_size) < 0) {
			perror("FATAL Error while writing block bitmap");
			return -1;
		}
		tmp_var--;
		total_blocks_written++;
	 }
	 /*
	  * Modify the bitmap for blocks allocated.
	  * */
	tmp_var = total_blocks_written;
	printf (PSFS_DBG_VAR("% llu\n",total_blocks_written));
	u_int64_t blocks_used = 0;
	memset(fs_block_buffer,0,block_size);
	printf(PSFS_DBG_VAR("%llu \n",tmp_var));
	while (tmp_var && (blocks_used < bmap_blocks)) {
#ifdef PSFS_DEBUG
		int64_t block_nr_alloced;
		if ( (block_nr_alloced=alloc_bmap(fs_block_buffer,block_size)) < 0) {
#else
		if (alloc_bmap(fs_block_buffer,block_size) < 0) {
#endif
			/*
			 * Write this bmap first to its position.
			 * */
			if (llseek(dev_fd,(data_bmap_block+blocks_used)*block_size,SEEK_SET) < 0)
			{
				perror("FATAL Error while seeking in device:");
				return -1;
			}
			if (write(dev_fd,fs_block_buffer,block_size) < 0)
			{
				perror("FATAL Error writing back block bitmap:");
				return -1;
			}
			blocks_used++;
			memset(fs_block_buffer,0,block_size); /*Clean the slate for next bmap block*/
		}
		else {
			tmp_var--;
#ifdef PSFS_DEBUG
			printf ("Allocated block number = %llu\n",block_nr_alloced);
#endif
		}
	}
	/*
	 * Check if we were really able to modify the bitmap. That is all bitmap
	 * allocations were done successfully. The last bitmap block still has to be
	 * written to disk.
	 * */
	if(tmp_var)
	{
		printf(PSFS_DBG_VAR("%llu \n",tmp_var));
		printf("FATAL Error, block allocation failed!!! NOT ENOUGH BLOCKS!!\n");
		return -1;
	}
	else if (write(dev_fd,fs_block_buffer,block_size) < 0)
	{
		perror("FATAL Error writing back block bitmap:");
		return -1;
	}
	/*
	 * Now we've set all the blocks we've taken. Setup the root directory
	 * and allocate it an extent and an inode.
	 * @blocks_used :The number of bitmap blocks used.
	 * @fs_block_buffer: The last bitmap block that was written to disk.
	 * */

	struct psfs_inode *root=&inode;
	memset(root,0,sizeof(*root));
	
	if (alloc_psfs_extent (&root->psfs_extent[0],min_extent_length*500,fs_block_buffer,block_size) < 0) {
		printf("Unable to allocate extent for root directory!!!\n");
		return -1;
	}
	root->psfs_extent[0].block_no+=blocks_used*(block_size * 8);
	/*
	 * Get an inode, set the bit in inode bitmap. Grab an extent
	 * and give it the root directory inode number.
	 * */

	if (llseek(dev_fd, (inode_bmap_block)*block_size,SEEK_SET) < 0) {
		perror("FATAL Error: Unable to seek into device for root directory!!!\n");
		return -1;
	}
	/*
	 * There are no inodes allocated yet so we don't need to read from device.
	 * */
	memset(fs_block_buffer,0,block_size);
	if ( (ino = alloc_bmap(fs_block_buffer,block_size)) < 0) {
		perror("FATAL Error: Unable to allocate an inode from bitmap for root directory!\n");
		return -1;
	}
	/*
	 *Write the inode bitmap block.
	 */
	 if (write(dev_fd,fs_block_buffer,block_size) < 0) {
	 	perror("FATAL Error: While writing bitmap block...\n");
		return -1;
	 }
	 
	 /*
	 *Finally write the directory entries . and .. for root directory.
	 *in the extent just created.
	 */
	struct psfs_dir_entry dirent_dot,dirent_dotdot;
	dirent_dot.inode_nr = dirent_dotdot.inode_nr = cpu_to_be32(root->inode_nr);
	dirent_dot.flags = dirent_dotdot.flags = cpu_to_be16(PSFS_ROOT_DIR|PSFS_NON_REM|PSFS_DIR);

	dirent_dot.name_len = 1;
	dirent_dotdot.name_len = 2;
	dirent_dot.name[0]=dirent_dotdot.name[0]='.';
	dirent_dotdot.name[1]='.';
	
	dirent_dot.rec_len = cpu_to_be16(PSFS_MIN_DIRENT_SIZE + 1);
	dirent_dotdot.rec_len = cpu_to_be16(PSFS_MIN_DIRENT_SIZE+2);
	memset(fs_block_buffer,0,block_size);
	root->size = cpu_to_be64(be16_to_cpu(dirent_dot.rec_len)+be16_to_cpu(dirent_dotdot.rec_len));
	root->inode_nr = cpu_to_be32(ino);
	printf(PSFS_DBG_VAR("=%x\n",root->psfs_extent[0].block_no));
	root->psfs_extent[0].block_no = cpu_to_be32(root->psfs_extent[0].block_no/*+be32_to_cpu(super.psfs_nr_boot_blocks)*/);
	root->psfs_extent[0].length = cpu_to_be32(root->psfs_extent[0].length);
	root->flags = (PSFS_NON_REM|PSFS_DIR|PSFS_ROOT_DIR);
	printf(PSFS_DBG_VAR("%x\n",root->flags));
	root->flags = cpu_to_be16(root->flags);
	root->a_time = root->c_time = root->m_time = cpu_to_be32(time(&tm));
	root->owner = cpu_to_be16(getuid() & 0x0ffff);
	root->type = cpu_to_be32(S_IFDIR|0755);
	memcpy(fs_block_buffer+inode_offset_from_ino(ino,&super),root,sizeof(*root));
	
	printf(PSFS_DBG_VAR("%016llX\n",root->size));
	printf(PSFS_DBG_VAR("=%x \n",root->flags));
	printf(PSFS_DBG_VAR("=%x \n",root->psfs_extent[0].block_no));
	printf(PSFS_DBG_VAR("=%x \n",root->psfs_extent[0].length));


	/*
	 *Seek to the required disk block to write inode. Since inode blocks begin
	 *after the first block, we just add 1 to the block number returned by
	 *inode_block_from_ino.
	 */
	if (llseek(dev_fd,(inode_block_from_ino(ino,&super)+1)*block_size,SEEK_SET) < 0) {
		perror("FATAL Error: While seeking into device\n");
		return -1;
	}
	if (write(dev_fd,fs_block_buffer,block_size) < 0) {
		perror("FATAL Error: While writing root inode in inode block\n");
		return -1;
	}
	
	if (llseek(dev_fd,(be32_to_cpu(root->psfs_extent[0].block_no))*block_size,SEEK_SET) < 0) {
	 	perror("FATAL Error: While seeking into device\n");
		return -1;
	}
	if (write(dev_fd,&dirent_dot,be16_to_cpu(dirent_dot.rec_len)) < 0) {
	 	perror("FATAL Error: While writing dirent . \n");
		return -1;
	}
	if (write(dev_fd,&dirent_dotdot,be16_to_cpu(dirent_dotdot.rec_len)) <0) {
	 	perror("FATAL Error: While writing dirent ..\n");
		return -1;
	}
	printf(PSFS_DBG_VAR("%llX\n",super.psfs_nr_blocks));
	printf(PSFS_DBG_VAR("%llX\n",super.psfs_nr_inodes));
	printf(PSFS_DBG_VAR("%X\n",super.psfs_boot_block));
	printf(PSFS_DBG_VAR("%X\n",super.psfs_nr_boot_blocks));
	printf(PSFS_DBG_VAR("%X\n",super.psfs_min_extent_length));
	printf(PSFS_DBG_VAR("%X\n",super.psfs_super_flags));
	printf(PSFS_DBG_VAR("%X\n",super.psfs_magic));
	printf(PSFS_DBG_VAR("%X\n",super.psfs_block_size));
	return 0;
}

int main(int argc,char *argv[])
{
	int32_t block_size=0,extent_length=0;
	int64_t nr_blocks=0,nr_inodes=0;
	extern int optind;
	char *strtol_ptr;
	int c;
	optind=2; /*The first is the device name, next comes options.*/
	if(argc<2)
	{
		printf("Usage %s <device_file> [-b block_size] [-i nr_inodes] [-N nr_blocks] [-L min extent length]\n",__progname);
		exit(EXIT_FAILURE);
	}
	while ( (c = getopt(argc,argv,OPTSTRING)) != -1) {
		switch (c) {
			case 'b':
				if ( (block_size = (int32_t)strtol(optarg,&strtol_ptr,10)) < 0 ) {
					printf("Invalid block_size value");
					exit(EXIT_FAILURE);
				}
				/*
				 *Block size is going to be in multiple of 4KB.
				 */
				if( (block_size & 0x0fff) ) {
					printf("Block size must"
						" be a multiple of 4KB\n");
					exit(EXIT_FAILURE);
				}
				break;
			case 'i':
				if ( (nr_inodes = (int64_t)strtoll(optarg,&strtol_ptr,10)) < 0) {
					printf("Invalid value used for number of inodes\n");
					exit(EXIT_FAILURE);
				}
				break;
			case 'N':
				if ( (nr_blocks = (int32_t)strtoll(optarg,&strtol_ptr,10)) < 0) {
					printf("Invalid value used for number of blocks\n");
					exit(EXIT_FAILURE);
				}
				break;
			case 'L':
				if ( (extent_length = (int64_t)strtol(optarg,&strtol_ptr,10)) < 0) {
					printf("Invalid value used for minimum extent length\n");
					exit(EXIT_FAILURE);
				}
				if (extent_length < 4)
				{
					printf("Minimum extent length must be 4\n");
					exit(EXIT_FAILURE);
				}
				break;
			default:
				printf("Ignoring unknown option %s and continuing...\n",optarg);
				break;
		}
	}
	if (format_psfs(argv[1],block_size,nr_inodes,nr_blocks,extent_length) < 0) {
		printf("Error in formatting device %s\n",argv[1]);
		exit(EXIT_FAILURE);
	}
return 0;
}