int fm_rx_seek(struct fmdev *fmdev, u32 seek_upward,
		u32 wrap_around, u32 spacing)
{
	u32 resp_len;
	u16 curr_frq, next_frq, last_frq;
	u16 payload, int_reason, intr_flag;
	u16 offset, space_idx;
	unsigned long timeleft;
	int ret;

	
	ret = fm_rx_set_channel_spacing(fmdev, spacing);
	if (ret < 0) {
		fmerr("Failed to set channel spacing\n");
		return ret;
	}

	
	ret = fmc_send_cmd(fmdev, FREQ_SET, REG_RD, NULL,
			sizeof(curr_frq), &curr_frq, &resp_len);
	if (ret < 0)
		return ret;

	curr_frq = be16_to_cpu(curr_frq);
	last_frq = (fmdev->rx.region.top_freq - fmdev->rx.region.bot_freq) / FM_FREQ_MUL;

	
	space_idx = fmdev->rx.region.chanl_space / FM_FREQ_MUL;
	offset = curr_frq % space_idx;

	next_frq = seek_upward ? curr_frq + space_idx  :
				curr_frq - space_idx  ;

	if ((short)next_frq < 0)
		next_frq = last_frq - offset;
	else if (next_frq > last_frq)
		next_frq = 0 + offset;

again:
	
	payload = next_frq;
	ret = fmc_send_cmd(fmdev, FREQ_SET, REG_WR, &payload,
			sizeof(payload), NULL, NULL);
	if (ret < 0)
		return ret;

	
	payload = (seek_upward ? FM_SEARCH_DIRECTION_UP : FM_SEARCH_DIRECTION_DOWN);
	ret = fmc_send_cmd(fmdev, SEARCH_DIR_SET, REG_WR, &payload,
			sizeof(payload), NULL, NULL);
	if (ret < 0)
		return ret;

	
	ret = fmc_send_cmd(fmdev, FLAG_GET, REG_RD, NULL, 2, NULL, NULL);
	if (ret < 0)
		return ret;

	
	intr_flag = fmdev->irq_info.mask;
	fmdev->irq_info.mask = (FM_FR_EVENT | FM_BL_EVENT);
	payload = fmdev->irq_info.mask;
	ret = fmc_send_cmd(fmdev, INT_MASK_SET, REG_WR, &payload,
			sizeof(payload), NULL, NULL);
	if (ret < 0)
		return ret;

	
	payload = FM_TUNER_AUTONOMOUS_SEARCH_MODE;
	ret = fmc_send_cmd(fmdev, TUNER_MODE_SET, REG_WR, &payload,
			sizeof(payload), NULL, NULL);
	if (ret < 0)
		return ret;

	
	init_completion(&fmdev->maintask_comp);
	timeleft = wait_for_completion_timeout(&fmdev->maintask_comp,
			FM_DRV_RX_SEEK_TIMEOUT);
	if (!timeleft) {
		fmerr("Timeout(%d sec),didn't get tune ended int\n",
			   jiffies_to_msecs(FM_DRV_RX_SEEK_TIMEOUT) / 1000);
		return -ETIMEDOUT;
	}

	int_reason = fmdev->irq_info.flag & (FM_TUNE_COMPLETE | FM_BAND_LIMIT);

	
	fmdev->irq_info.mask = intr_flag;
	payload = fmdev->irq_info.mask;
	ret = fmc_send_cmd(fmdev, INT_MASK_SET, REG_WR, &payload,
			sizeof(payload), NULL, NULL);
	if (ret < 0)
		return ret;

	if (int_reason & FM_BL_EVENT) {
		if (wrap_around == 0) {
			fmdev->rx.freq = seek_upward ?
				fmdev->rx.region.top_freq :
				fmdev->rx.region.bot_freq;
		} else {
			fmdev->rx.freq = seek_upward ?
				fmdev->rx.region.bot_freq :
				fmdev->rx.region.top_freq;
			
			next_frq = (fmdev->rx.freq -
					fmdev->rx.region.bot_freq) / FM_FREQ_MUL;
			goto again;
		}
	} else {
		
		ret = fmc_send_cmd(fmdev, FREQ_SET, REG_RD, NULL, 2,
				&curr_frq, &resp_len);
		if (ret < 0)
			return ret;

		curr_frq = be16_to_cpu(curr_frq);
		fmdev->rx.freq = (fmdev->rx.region.bot_freq +
				((u32)curr_frq * FM_FREQ_MUL));

	}
	
	fm_rx_reset_rds_cache(fmdev);
	fm_rx_reset_station_info(fmdev);

	return ret;
}
Esempio n. 2
0
STATIC int
xfs_attr_node_list(xfs_attr_list_context_t *context)
{
    attrlist_cursor_kern_t *cursor;
    xfs_attr_leafblock_t *leaf;
    xfs_da_intnode_t *node;
    struct xfs_attr3_icleaf_hdr leafhdr;
    struct xfs_da3_icnode_hdr nodehdr;
    struct xfs_da_node_entry *btree;
    int error, i;
    struct xfs_buf *bp;
    struct xfs_inode	*dp = context->dp;

    trace_xfs_attr_node_list(context);

    cursor = context->cursor;
    cursor->initted = 1;

    /*
     * Do all sorts of validation on the passed-in cursor structure.
     * If anything is amiss, ignore the cursor and look up the hashval
     * starting from the btree root.
     */
    bp = NULL;
    if (cursor->blkno > 0) {
        error = xfs_da3_node_read(NULL, dp, cursor->blkno, -1,
                                  &bp, XFS_ATTR_FORK);
        if ((error != 0) && (error != EFSCORRUPTED))
            return(error);
        if (bp) {
            struct xfs_attr_leaf_entry *entries;

            node = bp->b_addr;
            switch (be16_to_cpu(node->hdr.info.magic)) {
            case XFS_DA_NODE_MAGIC:
            case XFS_DA3_NODE_MAGIC:
                trace_xfs_attr_list_wrong_blk(context);
                xfs_trans_brelse(NULL, bp);
                bp = NULL;
                break;
            case XFS_ATTR_LEAF_MAGIC:
            case XFS_ATTR3_LEAF_MAGIC:
                leaf = bp->b_addr;
                xfs_attr3_leaf_hdr_from_disk(&leafhdr, leaf);
                entries = xfs_attr3_leaf_entryp(leaf);
                if (cursor->hashval > be32_to_cpu(
                            entries[leafhdr.count - 1].hashval)) {
                    trace_xfs_attr_list_wrong_blk(context);
                    xfs_trans_brelse(NULL, bp);
                    bp = NULL;
                } else if (cursor->hashval <= be32_to_cpu(
                               entries[0].hashval)) {
                    trace_xfs_attr_list_wrong_blk(context);
                    xfs_trans_brelse(NULL, bp);
                    bp = NULL;
                }
                break;
            default:
                trace_xfs_attr_list_wrong_blk(context);
                xfs_trans_brelse(NULL, bp);
                bp = NULL;
            }
        }
    }

    /*
     * We did not find what we expected given the cursor's contents,
     * so we start from the top and work down based on the hash value.
     * Note that start of node block is same as start of leaf block.
     */
    if (bp == NULL) {
        cursor->blkno = 0;
        for (;;) {
            __uint16_t magic;

            error = xfs_da3_node_read(NULL, dp,
                                      cursor->blkno, -1, &bp,
                                      XFS_ATTR_FORK);
            if (error)
                return(error);
            node = bp->b_addr;
            magic = be16_to_cpu(node->hdr.info.magic);
            if (magic == XFS_ATTR_LEAF_MAGIC ||
                    magic == XFS_ATTR3_LEAF_MAGIC)
                break;
            if (magic != XFS_DA_NODE_MAGIC &&
                    magic != XFS_DA3_NODE_MAGIC) {
                XFS_CORRUPTION_ERROR("xfs_attr_node_list(3)",
                                     XFS_ERRLEVEL_LOW,
                                     context->dp->i_mount,
                                     node);
                xfs_trans_brelse(NULL, bp);
                return XFS_ERROR(EFSCORRUPTED);
            }

            dp->d_ops->node_hdr_from_disk(&nodehdr, node);
            btree = dp->d_ops->node_tree_p(node);
            for (i = 0; i < nodehdr.count; btree++, i++) {
                if (cursor->hashval
                        <= be32_to_cpu(btree->hashval)) {
                    cursor->blkno = be32_to_cpu(btree->before);
                    trace_xfs_attr_list_node_descend(context,
                                                     btree);
                    break;
                }
            }
            if (i == nodehdr.count) {
                xfs_trans_brelse(NULL, bp);
                return 0;
            }
            xfs_trans_brelse(NULL, bp);
        }
    }
    ASSERT(bp != NULL);

    /*
     * Roll upward through the blocks, processing each leaf block in
     * order.  As long as there is space in the result buffer, keep
     * adding the information.
     */
    for (;;) {
        leaf = bp->b_addr;
        error = xfs_attr3_leaf_list_int(bp, context);
        if (error) {
            xfs_trans_brelse(NULL, bp);
            return error;
        }
        xfs_attr3_leaf_hdr_from_disk(&leafhdr, leaf);
        if (context->seen_enough || leafhdr.forw == 0)
            break;
        cursor->blkno = leafhdr.forw;
        xfs_trans_brelse(NULL, bp);
        error = xfs_attr3_leaf_read(NULL, dp, cursor->blkno, -1, &bp);
        if (error)
            return error;
    }
    xfs_trans_brelse(NULL, bp);
    return 0;
}
Esempio n. 3
0
static int sgi_get_ntrks(struct fdisk_context *cxt)
{
	struct sgi_disklabel *sgilabel = self_disklabel(cxt);
	return be16_to_cpu(sgilabel->devparam.ntrks);
}
Esempio n. 4
0
/*
 * Add an entry to a block directory.
 */
int						/* error */
xfs_dir2_block_addname(
	xfs_da_args_t		*args)		/* directory op arguments */
{
	xfs_dir2_data_free_t	*bf;		/* bestfree table in block */
	xfs_dir2_block_t	*block;		/* directory block structure */
	xfs_dir2_leaf_entry_t	*blp;		/* block leaf entries */
	xfs_dabuf_t		*bp;		/* buffer for block */
	xfs_dir2_block_tail_t	*btp;		/* block tail */
	int			compact;	/* need to compact leaf ents */
	xfs_dir2_data_entry_t	*dep;		/* block data entry */
	xfs_inode_t		*dp;		/* directory inode */
	xfs_dir2_data_unused_t	*dup;		/* block unused entry */
	int			error;		/* error return value */
	xfs_dir2_data_unused_t	*enddup=NULL;	/* unused at end of data */
	xfs_dahash_t		hash;		/* hash value of found entry */
	int			high;		/* high index for binary srch */
	int			highstale;	/* high stale index */
	int			lfloghigh=0;	/* last final leaf to log */
	int			lfloglow=0;	/* first final leaf to log */
	int			len;		/* length of the new entry */
	int			low;		/* low index for binary srch */
	int			lowstale;	/* low stale index */
	int			mid=0;		/* midpoint for binary srch */
	xfs_mount_t		*mp;		/* filesystem mount point */
	int			needlog;	/* need to log header */
	int			needscan;	/* need to rescan freespace */
	__be16			*tagp;		/* pointer to tag value */
	xfs_trans_t		*tp;		/* transaction structure */

	trace_xfs_dir2_block_addname(args);

	dp = args->dp;
	tp = args->trans;
	mp = dp->i_mount;
	/*
	 * Read the (one and only) directory block into dabuf bp.
	 */
	if ((error =
	    xfs_da_read_buf(tp, dp, mp->m_dirdatablk, -1, &bp, XFS_DATA_FORK))) {
		return error;
	}
	ASSERT(bp != NULL);
	block = bp->data;
	/*
	 * Check the magic number, corrupted if wrong.
	 */
	if (unlikely(be32_to_cpu(block->hdr.magic) != XFS_DIR2_BLOCK_MAGIC)) {
		XFS_CORRUPTION_ERROR("xfs_dir2_block_addname",
				     XFS_ERRLEVEL_LOW, mp, block);
		xfs_da_brelse(tp, bp);
		return XFS_ERROR(EFSCORRUPTED);
	}
	len = xfs_dir2_data_entsize(args->namelen);
	/*
	 * Set up pointers to parts of the block.
	 */
	bf = block->hdr.bestfree;
	btp = xfs_dir2_block_tail_p(mp, block);
	blp = xfs_dir2_block_leaf_p(btp);
	/*
	 * No stale entries?  Need space for entry and new leaf.
	 */
	if (!btp->stale) {
		/*
		 * Tag just before the first leaf entry.
		 */
		tagp = (__be16 *)blp - 1;
		/*
		 * Data object just before the first leaf entry.
		 */
		enddup = (xfs_dir2_data_unused_t *)((char *)block + be16_to_cpu(*tagp));
		/*
		 * If it's not free then can't do this add without cleaning up:
		 * the space before the first leaf entry needs to be free so it
		 * can be expanded to hold the pointer to the new entry.
		 */
		if (be16_to_cpu(enddup->freetag) != XFS_DIR2_DATA_FREE_TAG)
			dup = enddup = NULL;
		/*
		 * Check out the biggest freespace and see if it's the same one.
		 */
		else {
			dup = (xfs_dir2_data_unused_t *)
			      ((char *)block + be16_to_cpu(bf[0].offset));
			if (dup == enddup) {
				/*
				 * It is the biggest freespace, is it too small
				 * to hold the new leaf too?
				 */
				if (be16_to_cpu(dup->length) < len + (uint)sizeof(*blp)) {
					/*
					 * Yes, we use the second-largest
					 * entry instead if it works.
					 */
					if (be16_to_cpu(bf[1].length) >= len)
						dup = (xfs_dir2_data_unused_t *)
						      ((char *)block +
						       be16_to_cpu(bf[1].offset));
					else
						dup = NULL;
				}
			} else {
				/*
				 * Not the same free entry,
				 * just check its length.
				 */
				if (be16_to_cpu(dup->length) < len) {
					dup = NULL;
				}
			}
		}
		compact = 0;
	}
	/*
	 * If there are stale entries we'll use one for the leaf.
	 * Is the biggest entry enough to avoid compaction?
	 */
	else if (be16_to_cpu(bf[0].length) >= len) {
		dup = (xfs_dir2_data_unused_t *)
		      ((char *)block + be16_to_cpu(bf[0].offset));
		compact = 0;
	}
	/*
	 * Will need to compact to make this work.
	 */
	else {
		/*
		 * Tag just before the first leaf entry.
		 */
		tagp = (__be16 *)blp - 1;
		/*
		 * Data object just before the first leaf entry.
		 */
		dup = (xfs_dir2_data_unused_t *)((char *)block + be16_to_cpu(*tagp));
		/*
		 * If it's not free then the data will go where the
		 * leaf data starts now, if it works at all.
		 */
		if (be16_to_cpu(dup->freetag) == XFS_DIR2_DATA_FREE_TAG) {
			if (be16_to_cpu(dup->length) + (be32_to_cpu(btp->stale) - 1) *
			    (uint)sizeof(*blp) < len)
				dup = NULL;
		} else if ((be32_to_cpu(btp->stale) - 1) * (uint)sizeof(*blp) < len)
			dup = NULL;
		else
			dup = (xfs_dir2_data_unused_t *)blp;
		compact = 1;
	}
	/*
	 * If this isn't a real add, we're done with the buffer.
	 */
	if (args->op_flags & XFS_DA_OP_JUSTCHECK)
		xfs_da_brelse(tp, bp);
	/*
	 * If we don't have space for the new entry & leaf ...
	 */
	if (!dup) {
		/*
		 * Not trying to actually do anything, or don't have
		 * a space reservation: return no-space.
		 */
		if ((args->op_flags & XFS_DA_OP_JUSTCHECK) || args->total == 0)
			return XFS_ERROR(ENOSPC);
		/*
		 * Convert to the next larger format.
		 * Then add the new entry in that format.
		 */
		error = xfs_dir2_block_to_leaf(args, bp);
		xfs_da_buf_done(bp);
		if (error)
			return error;
		return xfs_dir2_leaf_addname(args);
	}
	/*
	 * Just checking, and it would work, so say so.
	 */
	if (args->op_flags & XFS_DA_OP_JUSTCHECK)
		return 0;
	needlog = needscan = 0;
	/*
	 * If need to compact the leaf entries, do it now.
	 * Leave the highest-numbered stale entry stale.
	 * XXX should be the one closest to mid but mid is not yet computed.
	 */
	if (compact) {
		int	fromidx;		/* source leaf index */
		int	toidx;			/* target leaf index */

		for (fromidx = toidx = be32_to_cpu(btp->count) - 1,
			highstale = lfloghigh = -1;
		     fromidx >= 0;
		     fromidx--) {
			if (be32_to_cpu(blp[fromidx].address) == XFS_DIR2_NULL_DATAPTR) {
				if (highstale == -1)
					highstale = toidx;
				else {
					if (lfloghigh == -1)
						lfloghigh = toidx;
					continue;
				}
			}
			if (fromidx < toidx)
				blp[toidx] = blp[fromidx];
			toidx--;
		}
		lfloglow = toidx + 1 - (be32_to_cpu(btp->stale) - 1);
		lfloghigh -= be32_to_cpu(btp->stale) - 1;
		be32_add_cpu(&btp->count, -(be32_to_cpu(btp->stale) - 1));
		xfs_dir2_data_make_free(tp, bp,
			(xfs_dir2_data_aoff_t)((char *)blp - (char *)block),
			(xfs_dir2_data_aoff_t)((be32_to_cpu(btp->stale) - 1) * sizeof(*blp)),
			&needlog, &needscan);
		blp += be32_to_cpu(btp->stale) - 1;
		btp->stale = cpu_to_be32(1);
		/*
		 * If we now need to rebuild the bestfree map, do so.
		 * This needs to happen before the next call to use_free.
		 */
		if (needscan) {
			xfs_dir2_data_freescan(mp, (xfs_dir2_data_t *)block, &needlog);
			needscan = 0;
		}
	}
	/*
	 * Set leaf logging boundaries to impossible state.
	 * For the no-stale case they're set explicitly.
	 */
	else if (btp->stale) {
		lfloglow = be32_to_cpu(btp->count);
		lfloghigh = -1;
	}
	/*
	 * Find the slot that's first lower than our hash value, -1 if none.
	 */
	for (low = 0, high = be32_to_cpu(btp->count) - 1; low <= high; ) {
		mid = (low + high) >> 1;
		if ((hash = be32_to_cpu(blp[mid].hashval)) == args->hashval)
			break;
		if (hash < args->hashval)
			low = mid + 1;
		else
			high = mid - 1;
	}
	while (mid >= 0 && be32_to_cpu(blp[mid].hashval) >= args->hashval) {
		mid--;
	}
	/*
	 * No stale entries, will use enddup space to hold new leaf.
	 */
	if (!btp->stale) {
		/*
		 * Mark the space needed for the new leaf entry, now in use.
		 */
		xfs_dir2_data_use_free(tp, bp, enddup,
			(xfs_dir2_data_aoff_t)
			((char *)enddup - (char *)block + be16_to_cpu(enddup->length) -
			 sizeof(*blp)),
			(xfs_dir2_data_aoff_t)sizeof(*blp),
			&needlog, &needscan);
		/*
		 * Update the tail (entry count).
		 */
		be32_add_cpu(&btp->count, 1);
		/*
		 * If we now need to rebuild the bestfree map, do so.
		 * This needs to happen before the next call to use_free.
		 */
		if (needscan) {
			xfs_dir2_data_freescan(mp, (xfs_dir2_data_t *)block,
				&needlog);
			needscan = 0;
		}
		/*
		 * Adjust pointer to the first leaf entry, we're about to move
		 * the table up one to open up space for the new leaf entry.
		 * Then adjust our index to match.
		 */
		blp--;
		mid++;
		if (mid)
			memmove(blp, &blp[1], mid * sizeof(*blp));
		lfloglow = 0;
		lfloghigh = mid;
	}
	/*
	 * Use a stale leaf for our new entry.
	 */
	else {
		for (lowstale = mid;
		     lowstale >= 0 &&
			be32_to_cpu(blp[lowstale].address) != XFS_DIR2_NULL_DATAPTR;
		     lowstale--)
			continue;
		for (highstale = mid + 1;
		     highstale < be32_to_cpu(btp->count) &&
			be32_to_cpu(blp[highstale].address) != XFS_DIR2_NULL_DATAPTR &&
			(lowstale < 0 || mid - lowstale > highstale - mid);
		     highstale++)
			continue;
		/*
		 * Move entries toward the low-numbered stale entry.
		 */
		if (lowstale >= 0 &&
		    (highstale == be32_to_cpu(btp->count) ||
		     mid - lowstale <= highstale - mid)) {
			if (mid - lowstale)
				memmove(&blp[lowstale], &blp[lowstale + 1],
					(mid - lowstale) * sizeof(*blp));
			lfloglow = MIN(lowstale, lfloglow);
			lfloghigh = MAX(mid, lfloghigh);
		}
		/*
		 * Move entries toward the high-numbered stale entry.
		 */
		else {
			ASSERT(highstale < be32_to_cpu(btp->count));
			mid++;
			if (highstale - mid)
				memmove(&blp[mid + 1], &blp[mid],
					(highstale - mid) * sizeof(*blp));
			lfloglow = MIN(mid, lfloglow);
			lfloghigh = MAX(highstale, lfloghigh);
		}
		be32_add_cpu(&btp->stale, -1);
	}
	/*
	 * Point to the new data entry.
	 */
	dep = (xfs_dir2_data_entry_t *)dup;
	/*
	 * Fill in the leaf entry.
	 */
	blp[mid].hashval = cpu_to_be32(args->hashval);
	blp[mid].address = cpu_to_be32(xfs_dir2_byte_to_dataptr(mp,
				(char *)dep - (char *)block));
	xfs_dir2_block_log_leaf(tp, bp, lfloglow, lfloghigh);
	/*
	 * Mark space for the data entry used.
	 */
	xfs_dir2_data_use_free(tp, bp, dup,
		(xfs_dir2_data_aoff_t)((char *)dup - (char *)block),
		(xfs_dir2_data_aoff_t)len, &needlog, &needscan);
	/*
	 * Create the new data entry.
	 */
	dep->inumber = cpu_to_be64(args->inumber);
	dep->namelen = args->namelen;
	memcpy(dep->name, args->name, args->namelen);
	tagp = xfs_dir2_data_entry_tag_p(dep);
	*tagp = cpu_to_be16((char *)dep - (char *)block);
	/*
	 * Clean up the bestfree array and log the header, tail, and entry.
	 */
	if (needscan)
		xfs_dir2_data_freescan(mp, (xfs_dir2_data_t *)block, &needlog);
	if (needlog)
		xfs_dir2_data_log_header(tp, bp);
	xfs_dir2_block_log_tail(tp, bp);
	xfs_dir2_data_log_entry(tp, bp, dep);
	xfs_dir2_data_check(dp, bp);
	xfs_da_buf_done(bp);
	return 0;
}
Esempio n. 5
0
/*
 * Probes Micron sensors with 8 bit address and 16 bit register width
 */
static int em28xx_probe_sensor_micron(struct em28xx *dev)
{
	int ret, i;
	char *name;
	u8 reg;
	__be16 id_be;
	u16 id;

	struct i2c_client client = dev->i2c_client[dev->def_i2c_bus];

	dev->em28xx_sensor = EM28XX_NOSENSOR;
	for (i = 0; micron_sensor_addrs[i] != I2C_CLIENT_END; i++) {
		client.addr = micron_sensor_addrs[i];
		/* NOTE: i2c_smbus_read_word_data() doesn't work with BE data */
		/* Read chip ID from register 0x00 */
		reg = 0x00;
		ret = i2c_master_send(&client, &reg, 1);
		if (ret < 0) {
			if (ret != -ENXIO)
				em28xx_errdev("couldn't read from i2c device 0x%02x: error %i\n",
					      client.addr << 1, ret);
			continue;
		}
		ret = i2c_master_recv(&client, (u8 *)&id_be, 2);
		if (ret < 0) {
			em28xx_errdev("couldn't read from i2c device 0x%02x: error %i\n",
				      client.addr << 1, ret);
			continue;
		}
		id = be16_to_cpu(id_be);
		/* Read chip ID from register 0xff */
		reg = 0xff;
		ret = i2c_master_send(&client, &reg, 1);
		if (ret < 0) {
			em28xx_errdev("couldn't read from i2c device 0x%02x: error %i\n",
				      client.addr << 1, ret);
			continue;
		}
		ret = i2c_master_recv(&client, (u8 *)&id_be, 2);
		if (ret < 0) {
			em28xx_errdev("couldn't read from i2c device 0x%02x: error %i\n",
				      client.addr << 1, ret);
			continue;
		}
		/* Validate chip ID to be sure we have a Micron device */
		if (id != be16_to_cpu(id_be))
			continue;
		/* Check chip ID */
		id = be16_to_cpu(id_be);
		switch (id) {
		case 0x1222:
			name = "MT9V012"; /* MI370 */ /* 640x480 */
			break;
		case 0x1229:
			name = "MT9V112"; /* 640x480 */
			break;
		case 0x1433:
			name = "MT9M011"; /* 1280x1024 */
			break;
		case 0x143a:    /* found in the ECS G200 */
			name = "MT9M111"; /* MI1310 */ /* 1280x1024 */
			dev->em28xx_sensor = EM28XX_MT9M111;
			break;
		case 0x148c:
			name = "MT9M112"; /* MI1320 */ /* 1280x1024 */
			break;
		case 0x1511:
			name = "MT9D011"; /* MI2010 */ /* 1600x1200 */
			break;
		case 0x8232:
		case 0x8243:	/* rev B */
			name = "MT9V011"; /* MI360 */ /* 640x480 */
			dev->em28xx_sensor = EM28XX_MT9V011;
			break;
		case 0x8431:
			name = "MT9M001"; /* 1280x1024 */
			dev->em28xx_sensor = EM28XX_MT9M001;
			break;
		default:
			em28xx_info("unknown Micron sensor detected: 0x%04x\n",
				    id);
			return 0;
		}

		if (dev->em28xx_sensor == EM28XX_NOSENSOR)
			em28xx_info("unsupported sensor detected: %s\n", name);
		else
			em28xx_info("sensor %s detected\n", name);

		dev->i2c_client[dev->def_i2c_bus].addr = client.addr;
		return 0;
	}

	return -ENODEV;
}
Esempio n. 6
0
static int ads7846_read12_ser(struct device *dev, unsigned command)
{
	struct spi_device	*spi = to_spi_device(dev);
	struct ads7846		*ts = dev_get_drvdata(dev);
	struct ser_req		*req = kzalloc(sizeof *req, GFP_KERNEL);
	int			status;
	int			use_internal;
    struct spi_message* m = &req->msg[0];
    struct spi_message* m1 = &req->msg[0];
	if (!req)
		return -ENOMEM;
    
	spi_message_init(m);

	/* FIXME boards with ads7846 might use external vref instead ... */
	use_internal = (ts->model == 7846);
    
	/* maybe turn on internal vREF, and let it settle */
	if (use_internal) {
		req->ref_on = REF_ON;
		req->xfer[0].tx_buf = &req->ref_on;
		req->xfer[0].len = 1;
		spi_message_add_tail(&req->xfer[0], m);

		req->xfer[1].rx_buf = &req->scratch;
		req->xfer[1].len = 2;

		/* for 1uF, settle for 800 usec; no cap, 100 usec.  */
		req->xfer[1].delay_usecs = ts->vref_delay_usecs;
		spi_message_add_tail(&req->xfer[1], m);
        m++;
    	spi_message_init(m);
	}

	/* take sample */
	req->command = (u8) command;
	req->xfer[2].tx_buf = &req->command;
	req->xfer[2].len = 1;
	spi_message_add_tail(&req->xfer[2], m);

	req->xfer[3].rx_buf = &req->sample;
	req->xfer[3].len = 2;
	spi_message_add_tail(&req->xfer[3], m);

    m++;
  	spi_message_init(m);
	/* REVISIT:  take a few more samples, and compare ... */

	/* converter in low power mode & enable PENIRQ */
	req->ref_off = PWRDOWN;
	req->xfer[4].tx_buf = &req->ref_off;
	req->xfer[4].len = 1;
	spi_message_add_tail(&req->xfer[4], m);

	req->xfer[5].rx_buf = &req->scratch;
	req->xfer[5].len = 2;
	CS_CHANGE(req->xfer[5]);
	spi_message_add_tail(&req->xfer[5], m);

	ts->irq_disabled = 1;
	disable_irq(spi->irq);
    while( m1 <= m )
    {
	    status = spi_sync(spi, m1++);
        if( status )break;
    }
	ts->irq_disabled = 0;
	enable_irq(spi->irq);

	if (status == 0) {
		/* on-wire is a must-ignore bit, a BE12 value, then padding */
		status = be16_to_cpu(req->sample);
		status = status >> 3;
		status &= 0x0fff;
	}
Esempio n. 7
0
/*
 * Convert the shortform directory to block form.
 */
int						/* error */
xfs_dir2_sf_to_block(
	xfs_da_args_t		*args)		/* operation arguments */
{
	xfs_dir2_db_t		blkno;		/* dir-relative block # (0) */
	xfs_dir2_block_t	*block;		/* block structure */
	xfs_dir2_leaf_entry_t	*blp;		/* block leaf entries */
	xfs_dabuf_t		*bp;		/* block buffer */
	xfs_dir2_block_tail_t	*btp;		/* block tail pointer */
	char			*buf;		/* sf buffer */
	int			buf_len;
	xfs_dir2_data_entry_t	*dep;		/* data entry pointer */
	xfs_inode_t		*dp;		/* incore directory inode */
	int			dummy;		/* trash */
	xfs_dir2_data_unused_t	*dup;		/* unused entry pointer */
	int			endoffset;	/* end of data objects */
	int			error;		/* error return value */
	int			i;		/* index */
	xfs_mount_t		*mp;		/* filesystem mount point */
	int			needlog;	/* need to log block header */
	int			needscan;	/* need to scan block freespc */
	int			newoffset;	/* offset from current entry */
	int			offset;		/* target block offset */
	xfs_dir2_sf_entry_t	*sfep;		/* sf entry pointer */
	xfs_dir2_sf_t		*sfp;		/* shortform structure */
	__be16			*tagp;		/* end of data entry */
	xfs_trans_t		*tp;		/* transaction pointer */
	struct xfs_name		name;

	trace_xfs_dir2_sf_to_block(args);

	dp = args->dp;
	tp = args->trans;
	mp = dp->i_mount;
	ASSERT(dp->i_df.if_flags & XFS_IFINLINE);
	/*
	 * Bomb out if the shortform directory is way too short.
	 */
	if (dp->i_d.di_size < offsetof(xfs_dir2_sf_hdr_t, parent)) {
		ASSERT(XFS_FORCED_SHUTDOWN(mp));
		return XFS_ERROR(EIO);
	}
	ASSERT(dp->i_df.if_bytes == dp->i_d.di_size);
	ASSERT(dp->i_df.if_u1.if_data != NULL);
	sfp = (xfs_dir2_sf_t *)dp->i_df.if_u1.if_data;
	ASSERT(dp->i_d.di_size >= xfs_dir2_sf_hdr_size(sfp->hdr.i8count));
	/*
	 * Copy the directory into the stack buffer.
	 * Then pitch the incore inode data so we can make extents.
	 */

	buf_len = dp->i_df.if_bytes;
	buf = kmem_alloc(buf_len, KM_SLEEP);

	memcpy(buf, sfp, buf_len);
	xfs_idata_realloc(dp, -buf_len, XFS_DATA_FORK);
	dp->i_d.di_size = 0;
	xfs_trans_log_inode(tp, dp, XFS_ILOG_CORE);
	/*
	 * Reset pointer - old sfp is gone.
	 */
	sfp = (xfs_dir2_sf_t *)buf;
	/*
	 * Add block 0 to the inode.
	 */
	error = xfs_dir2_grow_inode(args, XFS_DIR2_DATA_SPACE, &blkno);
	if (error) {
		kmem_free(buf);
		return error;
	}
	/*
	 * Initialize the data block.
	 */
	error = xfs_dir2_data_init(args, blkno, &bp);
	if (error) {
		kmem_free(buf);
		return error;
	}
	block = bp->data;
	block->hdr.magic = cpu_to_be32(XFS_DIR2_BLOCK_MAGIC);
	/*
	 * Compute size of block "tail" area.
	 */
	i = (uint)sizeof(*btp) +
	    (sfp->hdr.count + 2) * (uint)sizeof(xfs_dir2_leaf_entry_t);
	/*
	 * The whole thing is initialized to free by the init routine.
	 * Say we're using the leaf and tail area.
	 */
	dup = (xfs_dir2_data_unused_t *)block->u;
	needlog = needscan = 0;
	xfs_dir2_data_use_free(tp, bp, dup, mp->m_dirblksize - i, i, &needlog,
		&needscan);
	ASSERT(needscan == 0);
	/*
	 * Fill in the tail.
	 */
	btp = xfs_dir2_block_tail_p(mp, block);
	btp->count = cpu_to_be32(sfp->hdr.count + 2);	/* ., .. */
	btp->stale = 0;
	blp = xfs_dir2_block_leaf_p(btp);
	endoffset = (uint)((char *)blp - (char *)block);
	/*
	 * Remove the freespace, we'll manage it.
	 */
	xfs_dir2_data_use_free(tp, bp, dup,
		(xfs_dir2_data_aoff_t)((char *)dup - (char *)block),
		be16_to_cpu(dup->length), &needlog, &needscan);
	/*
	 * Create entry for .
	 */
	dep = (xfs_dir2_data_entry_t *)
	      ((char *)block + XFS_DIR2_DATA_DOT_OFFSET);
	dep->inumber = cpu_to_be64(dp->i_ino);
	dep->namelen = 1;
	dep->name[0] = '.';
	tagp = xfs_dir2_data_entry_tag_p(dep);
	*tagp = cpu_to_be16((char *)dep - (char *)block);
	xfs_dir2_data_log_entry(tp, bp, dep);
	blp[0].hashval = cpu_to_be32(xfs_dir_hash_dot);
	blp[0].address = cpu_to_be32(xfs_dir2_byte_to_dataptr(mp,
				(char *)dep - (char *)block));
	/*
	 * Create entry for ..
	 */
	dep = (xfs_dir2_data_entry_t *)
		((char *)block + XFS_DIR2_DATA_DOTDOT_OFFSET);
	dep->inumber = cpu_to_be64(xfs_dir2_sf_get_inumber(sfp, &sfp->hdr.parent));
	dep->namelen = 2;
	dep->name[0] = dep->name[1] = '.';
	tagp = xfs_dir2_data_entry_tag_p(dep);
	*tagp = cpu_to_be16((char *)dep - (char *)block);
	xfs_dir2_data_log_entry(tp, bp, dep);
	blp[1].hashval = cpu_to_be32(xfs_dir_hash_dotdot);
	blp[1].address = cpu_to_be32(xfs_dir2_byte_to_dataptr(mp,
				(char *)dep - (char *)block));
	offset = XFS_DIR2_DATA_FIRST_OFFSET;
	/*
	 * Loop over existing entries, stuff them in.
	 */
	if ((i = 0) == sfp->hdr.count)
		sfep = NULL;
	else
		sfep = xfs_dir2_sf_firstentry(sfp);
	/*
	 * Need to preserve the existing offset values in the sf directory.
	 * Insert holes (unused entries) where necessary.
	 */
	while (offset < endoffset) {
		/*
		 * sfep is null when we reach the end of the list.
		 */
		if (sfep == NULL)
			newoffset = endoffset;
		else
			newoffset = xfs_dir2_sf_get_offset(sfep);
		/*
		 * There should be a hole here, make one.
		 */
		if (offset < newoffset) {
			dup = (xfs_dir2_data_unused_t *)
			      ((char *)block + offset);
			dup->freetag = cpu_to_be16(XFS_DIR2_DATA_FREE_TAG);
			dup->length = cpu_to_be16(newoffset - offset);
			*xfs_dir2_data_unused_tag_p(dup) = cpu_to_be16(
				((char *)dup - (char *)block));
			xfs_dir2_data_log_unused(tp, bp, dup);
			(void)xfs_dir2_data_freeinsert((xfs_dir2_data_t *)block,
				dup, &dummy);
			offset += be16_to_cpu(dup->length);
			continue;
		}
		/*
		 * Copy a real entry.
		 */
		dep = (xfs_dir2_data_entry_t *)((char *)block + newoffset);
		dep->inumber = cpu_to_be64(xfs_dir2_sf_get_inumber(sfp,
				xfs_dir2_sf_inumberp(sfep)));
		dep->namelen = sfep->namelen;
		memcpy(dep->name, sfep->name, dep->namelen);
		tagp = xfs_dir2_data_entry_tag_p(dep);
		*tagp = cpu_to_be16((char *)dep - (char *)block);
		xfs_dir2_data_log_entry(tp, bp, dep);
		name.name = sfep->name;
		name.len = sfep->namelen;
		blp[2 + i].hashval = cpu_to_be32(mp->m_dirnameops->
							hashname(&name));
		blp[2 + i].address = cpu_to_be32(xfs_dir2_byte_to_dataptr(mp,
						 (char *)dep - (char *)block));
		offset = (int)((char *)(tagp + 1) - (char *)block);
		if (++i == sfp->hdr.count)
			sfep = NULL;
		else
			sfep = xfs_dir2_sf_nextentry(sfp, sfep);
	}
	/* Done with the temporary buffer */
	kmem_free(buf);
	/*
	 * Sort the leaf entries by hash value.
	 */
	xfs_sort(blp, be32_to_cpu(btp->count), sizeof(*blp), xfs_dir2_block_sort);
	/*
	 * Log the leaf entry area and tail.
	 * Already logged the header in data_init, ignore needlog.
	 */
	ASSERT(needscan == 0);
	xfs_dir2_block_log_leaf(tp, bp, 0, be32_to_cpu(btp->count) - 1);
	xfs_dir2_block_log_tail(tp, bp);
	xfs_dir2_data_check(dp, bp);
	xfs_da_buf_done(bp);
	return 0;
}
Esempio n. 8
0
static void set_dcd_param_v2(struct imx_header *imxhdr, uint32_t dcd_len,
		int32_t cmd)
{
	dcd_v2_t *dcd_v2 = &imxhdr->header.hdr_v2.data.dcd_table;
	struct dcd_v2_cmd *d = gd_last_cmd;
	struct dcd_v2_cmd *d2;
	int len;

	if (!d)
		d = &dcd_v2->dcd_cmd;
	d2 = d;
	len = be16_to_cpu(d->write_dcd_command.length);
	if (len > 4)
		d2 = (struct dcd_v2_cmd *)(((char *)d) + len);

	switch (cmd) {
	case CMD_WRITE_DATA:
		if ((d->write_dcd_command.tag == DCD_WRITE_DATA_COMMAND_TAG) &&
		    (d->write_dcd_command.param == DCD_WRITE_DATA_PARAM))
			break;
		d = d2;
		d->write_dcd_command.tag = DCD_WRITE_DATA_COMMAND_TAG;
		d->write_dcd_command.length = cpu_to_be16(4);
		d->write_dcd_command.param = DCD_WRITE_DATA_PARAM;
		break;
	case CMD_WRITE_CLR_BIT:
		if ((d->write_dcd_command.tag == DCD_WRITE_DATA_COMMAND_TAG) &&
		    (d->write_dcd_command.param == DCD_WRITE_CLR_BIT_PARAM))
			break;
		d = d2;
		d->write_dcd_command.tag = DCD_WRITE_DATA_COMMAND_TAG;
		d->write_dcd_command.length = cpu_to_be16(4);
		d->write_dcd_command.param = DCD_WRITE_CLR_BIT_PARAM;
		break;
	case CMD_WRITE_SET_BIT:
		if ((d->write_dcd_command.tag == DCD_WRITE_DATA_COMMAND_TAG) &&
		    (d->write_dcd_command.param == DCD_WRITE_SET_BIT_PARAM))
			break;
		d = d2;
		d->write_dcd_command.tag = DCD_WRITE_DATA_COMMAND_TAG;
		d->write_dcd_command.length = cpu_to_be16(4);
		d->write_dcd_command.param = DCD_WRITE_SET_BIT_PARAM;
		break;
	/*
	 * Check data command only supports one entry,
	 */
	case CMD_CHECK_BITS_SET:
		d = d2;
		d->write_dcd_command.tag = DCD_CHECK_DATA_COMMAND_TAG;
		d->write_dcd_command.length = cpu_to_be16(4);
		d->write_dcd_command.param = DCD_CHECK_BITS_SET_PARAM;
		break;
	case CMD_CHECK_BITS_CLR:
		d = d2;
		d->write_dcd_command.tag = DCD_CHECK_DATA_COMMAND_TAG;
		d->write_dcd_command.length = cpu_to_be16(4);
		d->write_dcd_command.param = DCD_CHECK_BITS_CLR_PARAM;
		break;
	default:
		break;
	}
	gd_last_cmd = d;
}
Esempio n. 9
0
static void print_hdr_v2(struct imx_header *imx_hdr)
{
	imx_header_v2_t *hdr_v2 = &imx_hdr->header.hdr_v2;
	flash_header_v2_t *fhdr_v2 = &hdr_v2->fhdr;
	dcd_v2_t *dcd_v2 = &hdr_v2->data.dcd_table;
	uint32_t size, version, plugin;

	plugin = hdr_v2->boot_data.plugin;
	if (!plugin) {
		size = be16_to_cpu(dcd_v2->header.length);
		if (size > (MAX_HW_CFG_SIZE_V2 * sizeof(dcd_addr_data_t))) {
			fprintf(stderr,
				"Error: Image corrupt DCD size %d exceed maximum %d\n",
				(uint32_t)(size / sizeof(dcd_addr_data_t)),
				MAX_HW_CFG_SIZE_V2);
			exit(EXIT_FAILURE);
		}
	}

	version = detect_imximage_version(imx_hdr);

	printf("Image Type:   Freescale IMX Boot Image\n");
	printf("Image Ver:    %x", version);
	printf("%s\n", get_table_entry_name(imximage_versions, NULL, version));
	printf("Mode:         %s\n", plugin ? "PLUGIN" : "DCD");
	if (!plugin) {
		printf("Data Size:    ");
		genimg_print_size(hdr_v2->boot_data.size);
		printf("Load Address: %08x\n", (uint32_t)fhdr_v2->boot_data_ptr);
		printf("Entry Point:  %08x\n", (uint32_t)fhdr_v2->entry);
		if (fhdr_v2->csf && (imximage_ivt_offset != UNDEFINED) &&
		    (imximage_csf_size != UNDEFINED)) {
			uint16_t dcdlen;
			int offs;

			dcdlen = hdr_v2->data.dcd_table.header.length;
			offs = (char *)&hdr_v2->data.dcd_table
				- (char *)hdr_v2;

			printf("HAB Blocks:   %08x %08x %08x\n",
			       (uint32_t)fhdr_v2->self, 0,
			       hdr_v2->boot_data.size - imximage_ivt_offset -
			       imximage_csf_size);
			printf("DCD Blocks:   00910000 %08x %08x\n",
			       offs, be16_to_cpu(dcdlen));
		}
	} else {
		imx_header_v2_t *next_hdr_v2;
		flash_header_v2_t *next_fhdr_v2;

		/*First Header*/
		printf("Plugin Data Size:     ");
		genimg_print_size(hdr_v2->boot_data.size);
		printf("Plugin Code Size:     ");
		genimg_print_size(imximage_plugin_size);
		printf("Plugin Load Address:  %08x\n", hdr_v2->boot_data.start);
		printf("Plugin Entry Point:   %08x\n", (uint32_t)fhdr_v2->entry);

		/*Second Header*/
		next_hdr_v2 = (imx_header_v2_t *)((char *)hdr_v2 +
				imximage_plugin_size);
		next_fhdr_v2 = &next_hdr_v2->fhdr;
		printf("U-Boot Data Size:     ");
		genimg_print_size(next_hdr_v2->boot_data.size);
		printf("U-Boot Load Address:  %08x\n",
		       next_hdr_v2->boot_data.start);
		printf("U-Boot Entry Point:   %08x\n",
		       (uint32_t)next_fhdr_v2->entry);
	}
}
Esempio n. 10
0
static inline unsigned short
read_short(const char *buf)
{
    return be16_to_cpu(*(unsigned short*)buf);
}
Esempio n. 11
0
static inline u16 flash_read16(void __iomem *addr)
{
	return be16_to_cpu(__raw_readw((void __iomem *)((unsigned long)addr ^ 0x2)));
}
static int hfsplus_readdir(struct file *filp, void *dirent, filldir_t filldir)
{
	struct inode *inode = file_inode(filp);
	struct super_block *sb = inode->i_sb;
	int len, err;
	char strbuf[HFSPLUS_MAX_STRLEN + 1];
	hfsplus_cat_entry entry;
	struct hfs_find_data fd;
	struct hfsplus_readdir_data *rd;
	u16 type;

	if (filp->f_pos >= inode->i_size)
		return 0;

	err = hfs_find_init(HFSPLUS_SB(sb)->cat_tree, &fd);
	if (err)
		return err;
	hfsplus_cat_build_key(sb, fd.search_key, inode->i_ino, NULL);
	err = hfs_brec_find(&fd);
	if (err)
		goto out;

	switch ((u32)filp->f_pos) {
	case 0:
		/* This is completely artificial... */
		if (filldir(dirent, ".", 1, 0, inode->i_ino, DT_DIR))
			goto out;
		filp->f_pos++;
		/* fall through */
	case 1:
		if (fd.entrylength > sizeof(entry) || fd.entrylength < 0) {
			err = -EIO;
			goto out;
		}

		hfs_bnode_read(fd.bnode, &entry, fd.entryoffset,
			fd.entrylength);
		if (be16_to_cpu(entry.type) != HFSPLUS_FOLDER_THREAD) {
			printk(KERN_ERR "hfs: bad catalog folder thread\n");
			err = -EIO;
			goto out;
		}
		if (fd.entrylength < HFSPLUS_MIN_THREAD_SZ) {
			printk(KERN_ERR "hfs: truncated catalog thread\n");
			err = -EIO;
			goto out;
		}
		if (filldir(dirent, "..", 2, 1,
			    be32_to_cpu(entry.thread.parentID), DT_DIR))
			goto out;
		filp->f_pos++;
		/* fall through */
	default:
		if (filp->f_pos >= inode->i_size)
			goto out;
		err = hfs_brec_goto(&fd, filp->f_pos - 1);
		if (err)
			goto out;
	}

	for (;;) {
		if (be32_to_cpu(fd.key->cat.parent) != inode->i_ino) {
			printk(KERN_ERR "hfs: walked past end of dir\n");
			err = -EIO;
			goto out;
		}

		if (fd.entrylength > sizeof(entry) || fd.entrylength < 0) {
			err = -EIO;
			goto out;
		}

		hfs_bnode_read(fd.bnode, &entry, fd.entryoffset,
			fd.entrylength);
		type = be16_to_cpu(entry.type);
		len = HFSPLUS_MAX_STRLEN;
		err = hfsplus_uni2asc(sb, &fd.key->cat.name, strbuf, &len);
		if (err)
			goto out;
		if (type == HFSPLUS_FOLDER) {
			if (fd.entrylength <
					sizeof(struct hfsplus_cat_folder)) {
				printk(KERN_ERR "hfs: small dir entry\n");
				err = -EIO;
				goto out;
			}
			if (HFSPLUS_SB(sb)->hidden_dir &&
			    HFSPLUS_SB(sb)->hidden_dir->i_ino ==
					be32_to_cpu(entry.folder.id))
				goto next;
			if (filldir(dirent, strbuf, len, filp->f_pos,
				    be32_to_cpu(entry.folder.id), DT_DIR))
				break;
		} else if (type == HFSPLUS_FILE) {
			if (fd.entrylength < sizeof(struct hfsplus_cat_file)) {
				printk(KERN_ERR "hfs: small file entry\n");
				err = -EIO;
				goto out;
			}
			if (filldir(dirent, strbuf, len, filp->f_pos,
				    be32_to_cpu(entry.file.id), DT_REG))
				break;
		} else {
			printk(KERN_ERR "hfs: bad catalog entry type\n");
			err = -EIO;
			goto out;
		}
next:
		filp->f_pos++;
		if (filp->f_pos >= inode->i_size)
			goto out;
		err = hfs_brec_goto(&fd, 1);
		if (err)
			goto out;
	}
	rd = filp->private_data;
	if (!rd) {
		rd = kmalloc(sizeof(struct hfsplus_readdir_data), GFP_KERNEL);
		if (!rd) {
			err = -ENOMEM;
			goto out;
		}
		filp->private_data = rd;
		rd->file = filp;
		list_add(&rd->list, &HFSPLUS_I(inode)->open_dir_list);
	}
	memcpy(&rd->key, fd.key, sizeof(struct hfsplus_cat_key));
out:
	hfs_find_exit(&fd);
	return err;
}
Esempio n. 13
0
File: srq.c Progetto: mhei/linux
int mlx4_ib_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
                          struct ib_recv_wr **bad_wr)
{
    struct mlx4_ib_srq *srq = to_msrq(ibsrq);
    struct mlx4_wqe_srq_next_seg *next;
    struct mlx4_wqe_data_seg *scat;
    unsigned long flags;
    int err = 0;
    int nreq;
    int i;
    struct mlx4_ib_dev *mdev = to_mdev(ibsrq->device);

    spin_lock_irqsave(&srq->lock, flags);
    if (mdev->dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR) {
        err = -EIO;
        *bad_wr = wr;
        nreq = 0;
        goto out;
    }

    for (nreq = 0; wr; ++nreq, wr = wr->next) {
        if (unlikely(wr->num_sge > srq->msrq.max_gs)) {
            err = -EINVAL;
            *bad_wr = wr;
            break;
        }

        if (unlikely(srq->head == srq->tail)) {
            err = -ENOMEM;
            *bad_wr = wr;
            break;
        }

        srq->wrid[srq->head] = wr->wr_id;

        next      = get_wqe(srq, srq->head);
        srq->head = be16_to_cpu(next->next_wqe_index);
        scat      = (struct mlx4_wqe_data_seg *) (next + 1);

        for (i = 0; i < wr->num_sge; ++i) {
            scat[i].byte_count = cpu_to_be32(wr->sg_list[i].length);
            scat[i].lkey       = cpu_to_be32(wr->sg_list[i].lkey);
            scat[i].addr       = cpu_to_be64(wr->sg_list[i].addr);
        }

        if (i < srq->msrq.max_gs) {
            scat[i].byte_count = 0;
            scat[i].lkey       = cpu_to_be32(MLX4_INVALID_LKEY);
            scat[i].addr       = 0;
        }
    }

    if (likely(nreq)) {
        srq->wqe_ctr += nreq;

        /*
         * Make sure that descriptors are written before
         * doorbell record.
         */
        wmb();

        *srq->db.db = cpu_to_be32(srq->wqe_ctr);
    }
out:

    spin_unlock_irqrestore(&srq->lock, flags);

    return err;
}
int fm_rx_set_freq(struct fmdev *fmdev, u32 freq)
{
	unsigned long timeleft;
	u16 payload, curr_frq, intr_flag;
	u32 curr_frq_in_khz;
	u32 resp_len;
	int ret;

	if (freq < fmdev->rx.region.bot_freq || freq > fmdev->rx.region.top_freq) {
		fmerr("Invalid frequency %d\n", freq);
		return -EINVAL;
	}

	
	payload = FM_RX_AUDIO_ENABLE_I2S_AND_ANALOG;

	ret = fmc_send_cmd(fmdev, AUDIO_ENABLE_SET, REG_WR, &payload,
			sizeof(payload), NULL, NULL);
	if (ret < 0)
		return ret;

	
	payload = FM_RX_IFFREQ_HILO_AUTOMATIC;
	ret = fmc_send_cmd(fmdev, HILO_SET, REG_WR, &payload,
			sizeof(payload), NULL, NULL);
	if (ret < 0)
		return ret;

	
	payload = (freq - fmdev->rx.region.bot_freq) / FM_FREQ_MUL;

	ret = fmc_send_cmd(fmdev, FREQ_SET, REG_WR, &payload,
			sizeof(payload), NULL, NULL);
	if (ret < 0)
		return ret;

	
	ret = fmc_send_cmd(fmdev, FLAG_GET, REG_RD, NULL, 2, NULL, NULL);
	if (ret < 0)
		return ret;

	
	intr_flag = fmdev->irq_info.mask;
	fmdev->irq_info.mask = (FM_FR_EVENT | FM_BL_EVENT);
	payload = fmdev->irq_info.mask;
	ret = fmc_send_cmd(fmdev, INT_MASK_SET, REG_WR, &payload,
			sizeof(payload), NULL, NULL);
	if (ret < 0)
		return ret;

	
	payload = FM_TUNER_PRESET_MODE;
	ret = fmc_send_cmd(fmdev, TUNER_MODE_SET, REG_WR, &payload,
			sizeof(payload), NULL, NULL);
	if (ret < 0)
		goto exit;

	
	init_completion(&fmdev->maintask_comp);
	timeleft = wait_for_completion_timeout(&fmdev->maintask_comp,
			FM_DRV_TX_TIMEOUT);
	if (!timeleft) {
		fmerr("Timeout(%d sec),didn't get tune ended int\n",
			   jiffies_to_msecs(FM_DRV_TX_TIMEOUT) / 1000);
		ret = -ETIMEDOUT;
		goto exit;
	}

	
	ret = fmc_send_cmd(fmdev, FREQ_SET, REG_RD, NULL, 2, &curr_frq, &resp_len);
	if (ret < 0)
		goto exit;

	curr_frq = be16_to_cpu(curr_frq);
	curr_frq_in_khz = (fmdev->rx.region.bot_freq + ((u32)curr_frq * FM_FREQ_MUL));

	if (curr_frq_in_khz != freq) {
		pr_info("Frequency is set to (%d) but "
			   "requested freq is (%d)\n", curr_frq_in_khz, freq);
	}

	
	fmdev->rx.freq = curr_frq_in_khz;
exit:
	
	fmdev->irq_info.mask = intr_flag;
	payload = fmdev->irq_info.mask;
	ret = fmc_send_cmd(fmdev, INT_MASK_SET, REG_WR, &payload,
			sizeof(payload), NULL, NULL);
	if (ret < 0)
		return ret;

	
	fm_rx_reset_rds_cache(fmdev);
	fm_rx_reset_station_info(fmdev);

	return ret;
}
Esempio n. 15
0
File: iomap.c Progetto: 274914765/C
unsigned int ioread16be(void __iomem *addr)
{
    return be16_to_cpu(__raw_readw(addr));
}
Esempio n. 16
0
/*
 * Upload a microcode to the I-RAM at a specific address.
 *
 * See docs/README.qe_firmware for information on QE microcode uploading.
 *
 * Currently, only version 1 is supported, so the 'version' field must be
 * set to 1.
 *
 * The SOC model and revision are not validated, they are only displayed for
 * informational purposes.
 *
 * 'calc_size' is the calculated size, in bytes, of the firmware structure and
 * all of the microcode structures, minus the CRC.
 *
 * 'length' is the size that the structure says it is, including the CRC.
 */
int qe_upload_firmware(const struct qe_firmware *firmware)
{
	unsigned int i;
	unsigned int j;
	u32 crc;
	size_t calc_size = sizeof(struct qe_firmware);
	size_t length;
	const struct qe_header *hdr;
#ifdef CONFIG_DEEP_SLEEP
	ccsr_gur_t *gur = (void *)(CONFIG_SYS_MPC85xx_GUTS_ADDR);
#endif
	if (!firmware) {
		printf("Invalid address\n");
		return -EINVAL;
	}

	hdr = &firmware->header;
	length = be32_to_cpu(hdr->length);

	/* Check the magic */
	if ((hdr->magic[0] != 'Q') || (hdr->magic[1] != 'E') ||
	    (hdr->magic[2] != 'F')) {
		printf("QE microcode not found\n");
#ifdef CONFIG_DEEP_SLEEP
		setbits_be32(&gur->devdisr, MPC85xx_DEVDISR_QE_DISABLE);
#endif
		return -EPERM;
	}

	/* Check the version */
	if (hdr->version != 1) {
		printf("Unsupported version\n");
		return -EPERM;
	}

	/* Validate some of the fields */
	if ((firmware->count < 1) || (firmware->count > MAX_QE_RISC)) {
		printf("Invalid data\n");
		return -EINVAL;
	}

	/* Validate the length and check if there's a CRC */
	calc_size += (firmware->count - 1) * sizeof(struct qe_microcode);

	for (i = 0; i < firmware->count; i++)
		/*
		 * For situations where the second RISC uses the same microcode
		 * as the first, the 'code_offset' and 'count' fields will be
		 * zero, so it's okay to add those.
		 */
		calc_size += sizeof(u32) *
			be32_to_cpu(firmware->microcode[i].count);

	/* Validate the length */
	if (length != calc_size + sizeof(u32)) {
		printf("Invalid length\n");
		return -EPERM;
	}

	/*
	 * Validate the CRC.  We would normally call crc32_no_comp(), but that
	 * function isn't available unless you turn on JFFS support.
	 */
	crc = be32_to_cpu(*(u32 *)((void *)firmware + calc_size));
	if (crc != (crc32(-1, (const void *) firmware, calc_size) ^ -1)) {
		printf("Firmware CRC is invalid\n");
		return -EIO;
	}

	/*
	 * If the microcode calls for it, split the I-RAM.
	 */
	if (!firmware->split) {
		out_be16(&qe_immr->cp.cercr,
			in_be16(&qe_immr->cp.cercr) | QE_CP_CERCR_CIR);
	}

	if (firmware->soc.model)
		printf("Firmware '%s' for %u V%u.%u\n",
			firmware->id, be16_to_cpu(firmware->soc.model),
			firmware->soc.major, firmware->soc.minor);
	else
		printf("Firmware '%s'\n", firmware->id);

	/*
	 * The QE only supports one microcode per RISC, so clear out all the
	 * saved microcode information and put in the new.
	 */
	memset(&qe_firmware_info, 0, sizeof(qe_firmware_info));
	strcpy(qe_firmware_info.id, (char *)firmware->id);
	qe_firmware_info.extended_modes = firmware->extended_modes;
	memcpy(qe_firmware_info.vtraps, firmware->vtraps,
		sizeof(firmware->vtraps));
	qe_firmware_uploaded = 1;

	/* Loop through each microcode. */
	for (i = 0; i < firmware->count; i++) {
		const struct qe_microcode *ucode = &firmware->microcode[i];

		/* Upload a microcode if it's present */
		if (ucode->code_offset)
			qe_upload_microcode(firmware, ucode);

		/* Program the traps for this processor */
		for (j = 0; j < 16; j++) {
			u32 trap = be32_to_cpu(ucode->traps[j]);

			if (trap)
				out_be32(&qe_immr->rsp[i].tibcr[j], trap);
		}

		/* Enable traps */
		out_be32(&qe_immr->rsp[i].eccr, be32_to_cpu(ucode->eccr));
	}

	return 0;
}
/*
 * Invalidate all of the "remote" value regions pointed to by a particular
 * leaf block.
 * Note that we must release the lock on the buffer so that we are not
 * caught holding something that the logging code wants to flush to disk.
 */
STATIC int
xfs_attr3_leaf_inactive(
	struct xfs_trans	**trans,
	struct xfs_inode	*dp,
	struct xfs_buf		*bp)
{
	struct xfs_attr_leafblock *leaf;
	struct xfs_attr3_icleaf_hdr ichdr;
	struct xfs_attr_leaf_entry *entry;
	struct xfs_attr_leaf_name_remote *name_rmt;
	struct xfs_attr_inactive_list *list;
	struct xfs_attr_inactive_list *lp;
	int			error;
	int			count;
	int			size;
	int			tmp;
	int			i;

	leaf = bp->b_addr;
	xfs_attr3_leaf_hdr_from_disk(&ichdr, leaf);

	/*
	 * Count the number of "remote" value extents.
	 */
	count = 0;
	entry = xfs_attr3_leaf_entryp(leaf);
	for (i = 0; i < ichdr.count; entry++, i++) {
		if (be16_to_cpu(entry->nameidx) &&
		    ((entry->flags & XFS_ATTR_LOCAL) == 0)) {
			name_rmt = xfs_attr3_leaf_name_remote(leaf, i);
			if (name_rmt->valueblk)
				count++;
		}
	}

	/*
	 * If there are no "remote" values, we're done.
	 */
	if (count == 0) {
		xfs_trans_brelse(*trans, bp);
		return 0;
	}

	/*
	 * Allocate storage for a list of all the "remote" value extents.
	 */
	size = count * sizeof(xfs_attr_inactive_list_t);
	list = kmem_alloc(size, KM_SLEEP);

	/*
	 * Identify each of the "remote" value extents.
	 */
	lp = list;
	entry = xfs_attr3_leaf_entryp(leaf);
	for (i = 0; i < ichdr.count; entry++, i++) {
		if (be16_to_cpu(entry->nameidx) &&
		    ((entry->flags & XFS_ATTR_LOCAL) == 0)) {
			name_rmt = xfs_attr3_leaf_name_remote(leaf, i);
			if (name_rmt->valueblk) {
				lp->valueblk = be32_to_cpu(name_rmt->valueblk);
				lp->valuelen = xfs_attr3_rmt_blocks(dp->i_mount,
						    be32_to_cpu(name_rmt->valuelen));
				lp++;
			}
		}
	}
	xfs_trans_brelse(*trans, bp);	/* unlock for trans. in freextent() */

	/*
	 * Invalidate each of the "remote" value extents.
	 */
	error = 0;
	for (lp = list, i = 0; i < count; i++, lp++) {
		tmp = xfs_attr3_leaf_freextent(trans, dp,
				lp->valueblk, lp->valuelen);

		if (error == 0)
			error = tmp;	/* save only the 1st errno */
	}

	kmem_free(list);
	return error;
}
Esempio n. 18
0
static uint16_t
blobmsg_namelen(const struct blobmsg_hdr *hdr)
{
	return be16_to_cpu(hdr->namelen);
}
Esempio n. 19
0
BOOLEAN SendImageDataPacket(PMINIPORT_ADAPTER Adapter, unsigned short uiCmdId)
{
	PHW_PACKET_HEADER pPktHdr;
	UCHAR pTxPkt[MAX_IMAGE_DATA_MSG_LENGTH];
	UINT uiPayloadSize;
	UINT uiBufOffset;
	UINT uiImageLength;
	PIMAGE_DATA_PAYLOAD pImageDataPayload;
	PWIMAX_MESSAGE_HEADER pWibroMsgHdr;
	int status;
	UINT dwTxSize;

	pPktHdr = (PHW_PACKET_HEADER)pTxPkt;
	pPktHdr->Id0 = 'W';
	pPktHdr->Id1 = 'C';	

	uiBufOffset = sizeof(HW_PACKET_HEADER);
	pWibroMsgHdr = (PWIMAX_MESSAGE_HEADER)(pTxPkt + uiBufOffset);
	pWibroMsgHdr->MsgType = be16_to_cpu(ETHERTYPE_DL);
	pWibroMsgHdr->MsgID = be16_to_cpu(uiCmdId);	

	if(g_stWiMAXImage.uiOffset < (g_stWiMAXImage.uiSize - MAX_IMAGE_DATA_LENGTH))
		uiImageLength = MAX_IMAGE_DATA_LENGTH;
	else
		uiImageLength = g_stWiMAXImage.uiSize - g_stWiMAXImage.uiOffset;		
		
	uiBufOffset += sizeof(WIMAX_MESSAGE_HEADER);
	pImageDataPayload = (PIMAGE_DATA_PAYLOAD)(pTxPkt + uiBufOffset);
	pImageDataPayload->uiOffset = be32_to_cpu( g_stWiMAXImage.uiOffset);
	pImageDataPayload->uiLength = be32_to_cpu(uiImageLength);

	memcpy(pImageDataPayload->ucImageData, 
		      g_stWiMAXImage.pImage + g_stWiMAXImage.uiOffset, 
		      uiImageLength);

	uiPayloadSize = uiImageLength + 8; // PayloadÀÇ Offset + Length + ImageData ±æÀÌ
	pPktHdr->Length = be16_to_cpu(CMD_MSG_TOTAL_LENGTH + uiPayloadSize); 
	pWibroMsgHdr->MsgLength = be32_to_cpu(uiPayloadSize);

	dwTxSize = CMD_MSG_TOTAL_LENGTH + uiPayloadSize;

#if 0
	{
				// dump packets
				UINT i, l = dwTxSize;				
				RETAILMSG(1, (TEXT("Send Image Data packet [%d] = "), l));
				for (i = 0; i < l; i++) {
					RETAILMSG(1, (TEXT("%02x"), pTxPkt[i]));
					if (i != (l - 1)) 
						RETAILMSG(1, (_T(",")));
					if ((i != 0) && ((i%32) == 0)) RETAILMSG(1, (_T("\n")));
				}
				RETAILMSG(1, (_T("\n")));
	}			
#endif

	status = sd_send(Adapter, pTxPkt, dwTxSize);

	if(status != STATUS_SUCCESS) {
		// crc error or data error - set PCWRT '1' & send current type A packet again		
		DumpDebug(FW_DNLD, "hwSdioWrite : crc error");
		return status;//goto rewrite;
	}
	g_stWiMAXImage.uiOffset += uiImageLength;

	return status;		
}
Esempio n. 20
0
static int hfsplus_readdir(struct file *file, struct dir_context *ctx)
{
	struct inode *inode = file_inode(file);
	struct super_block *sb = inode->i_sb;
	int len, err;
	char strbuf[HFSPLUS_MAX_STRLEN + 1];
	hfsplus_cat_entry entry;
	struct hfs_find_data fd;
	struct hfsplus_readdir_data *rd;
	u16 type;

	if (file->f_pos >= inode->i_size)
		return 0;

	err = hfs_find_init(HFSPLUS_SB(sb)->cat_tree, &fd);
	if (err)
		return err;
	hfsplus_cat_build_key(sb, fd.search_key, inode->i_ino, NULL);
	err = hfs_brec_find(&fd, hfs_find_rec_by_key);
	if (err)
		goto out;

	if (ctx->pos == 0) {
		/* This is completely artificial... */
		if (!dir_emit_dot(file, ctx))
			goto out;
		ctx->pos = 1;
	}
	if (ctx->pos == 1) {
		if (fd.entrylength > sizeof(entry) || fd.entrylength < 0) {
			err = -EIO;
			goto out;
		}

		hfs_bnode_read(fd.bnode, &entry, fd.entryoffset,
			fd.entrylength);
		if (be16_to_cpu(entry.type) != HFSPLUS_FOLDER_THREAD) {
			pr_err("bad catalog folder thread\n");
			err = -EIO;
			goto out;
		}
		if (fd.entrylength < HFSPLUS_MIN_THREAD_SZ) {
			pr_err("truncated catalog thread\n");
			err = -EIO;
			goto out;
		}
		if (!dir_emit(ctx, "..", 2,
			    be32_to_cpu(entry.thread.parentID), DT_DIR))
			goto out;
		ctx->pos = 2;
	}
	if (ctx->pos >= inode->i_size)
		goto out;
	err = hfs_brec_goto(&fd, ctx->pos - 1);
	if (err)
		goto out;
	for (;;) {
		if (be32_to_cpu(fd.key->cat.parent) != inode->i_ino) {
			pr_err("walked past end of dir\n");
			err = -EIO;
			goto out;
		}

		if (fd.entrylength > sizeof(entry) || fd.entrylength < 0) {
			err = -EIO;
			goto out;
		}

		hfs_bnode_read(fd.bnode, &entry, fd.entryoffset,
			fd.entrylength);
		type = be16_to_cpu(entry.type);
		len = HFSPLUS_MAX_STRLEN;
		err = hfsplus_uni2asc(sb, &fd.key->cat.name, strbuf, &len);
		if (err)
			goto out;
		if (type == HFSPLUS_FOLDER) {
			if (fd.entrylength <
					sizeof(struct hfsplus_cat_folder)) {
				pr_err("small dir entry\n");
				err = -EIO;
				goto out;
			}
			if (HFSPLUS_SB(sb)->hidden_dir &&
			    HFSPLUS_SB(sb)->hidden_dir->i_ino ==
					be32_to_cpu(entry.folder.id))
				goto next;
			if (!dir_emit(ctx, strbuf, len,
				    be32_to_cpu(entry.folder.id), DT_DIR))
				break;
		} else if (type == HFSPLUS_FILE) {
			if (fd.entrylength < sizeof(struct hfsplus_cat_file)) {
				pr_err("small file entry\n");
				err = -EIO;
				goto out;
			}
			if (!dir_emit(ctx, strbuf, len,
				    be32_to_cpu(entry.file.id), DT_REG))
				break;
		} else {
			pr_err("bad catalog entry type\n");
			err = -EIO;
			goto out;
		}
next:
		ctx->pos++;
		if (ctx->pos >= inode->i_size)
			goto out;
		err = hfs_brec_goto(&fd, 1);
		if (err)
			goto out;
	}
	rd = file->private_data;
	if (!rd) {
		rd = kmalloc(sizeof(struct hfsplus_readdir_data), GFP_KERNEL);
		if (!rd) {
			err = -ENOMEM;
			goto out;
		}
		file->private_data = rd;
		rd->file = file;
		list_add(&rd->list, &HFSPLUS_I(inode)->open_dir_list);
	}
	memcpy(&rd->key, fd.key, sizeof(struct hfsplus_cat_key));
out:
	hfs_find_exit(&fd);
	return err;
}
Esempio n. 21
0
/*
 * Readdir for block directories.
 */
int						/* error */
xfs_dir2_block_getdents(
	xfs_inode_t		*dp,		/* incore inode */
	void			*dirent,
	xfs_off_t		*offset,
	filldir_t		filldir)
{
	xfs_dir2_block_t	*block;		/* directory block structure */
	xfs_dabuf_t		*bp;		/* buffer for block */
	xfs_dir2_block_tail_t	*btp;		/* block tail */
	xfs_dir2_data_entry_t	*dep;		/* block data entry */
	xfs_dir2_data_unused_t	*dup;		/* block unused entry */
	char			*endptr;	/* end of the data entries */
	int			error;		/* error return value */
	xfs_mount_t		*mp;		/* filesystem mount point */
	char			*ptr;		/* current data entry */
	int			wantoff;	/* starting block offset */
	xfs_off_t		cook;

	mp = dp->i_mount;
	/*
	 * If the block number in the offset is out of range, we're done.
	 */
	if (xfs_dir2_dataptr_to_db(mp, *offset) > mp->m_dirdatablk) {
		return 0;
	}
	/*
	 * Can't read the block, give up, else get dabuf in bp.
	 */
	error = xfs_da_read_buf(NULL, dp, mp->m_dirdatablk, -1,
				&bp, XFS_DATA_FORK);
	if (error)
		return error;

	ASSERT(bp != NULL);
	/*
	 * Extract the byte offset we start at from the seek pointer.
	 * We'll skip entries before this.
	 */
	wantoff = xfs_dir2_dataptr_to_off(mp, *offset);
	block = bp->data;
	xfs_dir2_data_check(dp, bp);
	/*
	 * Set up values for the loop.
	 */
	btp = xfs_dir2_block_tail_p(mp, block);
	ptr = (char *)block->u;
	endptr = (char *)xfs_dir2_block_leaf_p(btp);

	/*
	 * Loop over the data portion of the block.
	 * Each object is a real entry (dep) or an unused one (dup).
	 */
	while (ptr < endptr) {
		dup = (xfs_dir2_data_unused_t *)ptr;
		/*
		 * Unused, skip it.
		 */
		if (be16_to_cpu(dup->freetag) == XFS_DIR2_DATA_FREE_TAG) {
			ptr += be16_to_cpu(dup->length);
			continue;
		}

		dep = (xfs_dir2_data_entry_t *)ptr;

		/*
		 * Bump pointer for the next iteration.
		 */
		ptr += xfs_dir2_data_entsize(dep->namelen);
		/*
		 * The entry is before the desired starting point, skip it.
		 */
		if ((char *)dep - (char *)block < wantoff)
			continue;

		cook = xfs_dir2_db_off_to_dataptr(mp, mp->m_dirdatablk,
					    (char *)dep - (char *)block);

		/*
		 * If it didn't fit, set the final offset to here & return.
		 */
		if (filldir(dirent, (char *)dep->name, dep->namelen,
			    cook & 0x7fffffff, be64_to_cpu(dep->inumber),
			    DT_UNKNOWN)) {
			*offset = cook & 0x7fffffff;
			xfs_da_brelse(NULL, bp);
			return 0;
		}
	}

	/*
	 * Reached the end of the block.
	 * Set the offset to a non-existent block 1 and return.
	 */
	*offset = xfs_dir2_db_off_to_dataptr(mp, mp->m_dirdatablk + 1, 0) &
			0x7fffffff;
	xfs_da_brelse(NULL, bp);
	return 0;
}
Esempio n. 22
0
/* Find the entry inside dir named dentry->d_name */
static struct dentry *hfsplus_lookup(struct inode *dir, struct dentry *dentry,
				     unsigned int flags)
{
	struct inode *inode = NULL;
	struct hfs_find_data fd;
	struct super_block *sb;
	hfsplus_cat_entry entry;
	int err;
	u32 cnid, linkid = 0;
	u16 type;

	sb = dir->i_sb;

	dentry->d_fsdata = NULL;
	err = hfs_find_init(HFSPLUS_SB(sb)->cat_tree, &fd);
	if (err)
		return ERR_PTR(err);
	hfsplus_cat_build_key(sb, fd.search_key, dir->i_ino, &dentry->d_name);
again:
	err = hfs_brec_read(&fd, &entry, sizeof(entry));
	if (err) {
		if (err == -ENOENT) {
			hfs_find_exit(&fd);
			/* No such entry */
			inode = NULL;
			goto out;
		}
		goto fail;
	}
	type = be16_to_cpu(entry.type);
	if (type == HFSPLUS_FOLDER) {
		if (fd.entrylength < sizeof(struct hfsplus_cat_folder)) {
			err = -EIO;
			goto fail;
		}
		cnid = be32_to_cpu(entry.folder.id);
		dentry->d_fsdata = (void *)(unsigned long)cnid;
	} else if (type == HFSPLUS_FILE) {
		if (fd.entrylength < sizeof(struct hfsplus_cat_file)) {
			err = -EIO;
			goto fail;
		}
		cnid = be32_to_cpu(entry.file.id);
		if (entry.file.user_info.fdType ==
				cpu_to_be32(HFSP_HARDLINK_TYPE) &&
				entry.file.user_info.fdCreator ==
				cpu_to_be32(HFSP_HFSPLUS_CREATOR) &&
				(entry.file.create_date ==
					HFSPLUS_I(HFSPLUS_SB(sb)->hidden_dir)->
						create_date ||
				entry.file.create_date ==
					HFSPLUS_I(sb->s_root->d_inode)->
						create_date) &&
				HFSPLUS_SB(sb)->hidden_dir) {
			struct qstr str;
			char name[32];

			if (dentry->d_fsdata) {
				/*
				 * We found a link pointing to another link,
				 * so ignore it and treat it as regular file.
				 */
				cnid = (unsigned long)dentry->d_fsdata;
				linkid = 0;
			} else {
				dentry->d_fsdata = (void *)(unsigned long)cnid;
				linkid =
					be32_to_cpu(entry.file.permissions.dev);
				str.len = sprintf(name, "iNode%d", linkid);
				str.name = name;
				hfsplus_cat_build_key(sb, fd.search_key,
					HFSPLUS_SB(sb)->hidden_dir->i_ino,
					&str);
				goto again;
			}
		} else if (!dentry->d_fsdata)
			dentry->d_fsdata = (void *)(unsigned long)cnid;
	} else {
		pr_err("invalid catalog entry type in lookup\n");
		err = -EIO;
		goto fail;
	}
	hfs_find_exit(&fd);
	inode = hfsplus_iget(dir->i_sb, cnid);
	if (IS_ERR(inode))
		return ERR_CAST(inode);
	if (S_ISREG(inode->i_mode))
		HFSPLUS_I(inode)->linkid = linkid;
out:
	d_add(dentry, inode);
	return NULL;
fail:
	hfs_find_exit(&fd);
	return ERR_PTR(err);
}
Esempio n. 23
0
/*
 * Convert a V2 leaf directory to a V2 block directory if possible.
 */
int						/* error */
xfs_dir2_leaf_to_block(
	xfs_da_args_t		*args,		/* operation arguments */
	xfs_dabuf_t		*lbp,		/* leaf buffer */
	xfs_dabuf_t		*dbp)		/* data buffer */
{
	__be16			*bestsp;	/* leaf bests table */
	xfs_dir2_block_t	*block;		/* block structure */
	xfs_dir2_block_tail_t	*btp;		/* block tail */
	xfs_inode_t		*dp;		/* incore directory inode */
	xfs_dir2_data_unused_t	*dup;		/* unused data entry */
	int			error;		/* error return value */
	int			from;		/* leaf from index */
	xfs_dir2_leaf_t		*leaf;		/* leaf structure */
	xfs_dir2_leaf_entry_t	*lep;		/* leaf entry */
	xfs_dir2_leaf_tail_t	*ltp;		/* leaf tail structure */
	xfs_mount_t		*mp;		/* file system mount point */
	int			needlog;	/* need to log data header */
	int			needscan;	/* need to scan for bestfree */
	xfs_dir2_sf_hdr_t	sfh;		/* shortform header */
	int			size;		/* bytes used */
	__be16			*tagp;		/* end of entry (tag) */
	int			to;		/* block/leaf to index */
	xfs_trans_t		*tp;		/* transaction pointer */

	trace_xfs_dir2_leaf_to_block(args);

	dp = args->dp;
	tp = args->trans;
	mp = dp->i_mount;
	leaf = lbp->data;
	ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_DIR2_LEAF1_MAGIC);
	ltp = xfs_dir2_leaf_tail_p(mp, leaf);
	/*
	 * If there are data blocks other than the first one, take this
	 * opportunity to remove trailing empty data blocks that may have
	 * been left behind during no-space-reservation operations.
	 * These will show up in the leaf bests table.
	 */
	while (dp->i_d.di_size > mp->m_dirblksize) {
		bestsp = xfs_dir2_leaf_bests_p(ltp);
		if (be16_to_cpu(bestsp[be32_to_cpu(ltp->bestcount) - 1]) ==
		    mp->m_dirblksize - (uint)sizeof(block->hdr)) {
			if ((error =
			    xfs_dir2_leaf_trim_data(args, lbp,
				    (xfs_dir2_db_t)(be32_to_cpu(ltp->bestcount) - 1))))
				goto out;
		} else {
			error = 0;
			goto out;
		}
	}
	/*
	 * Read the data block if we don't already have it, give up if it fails.
	 */
	if (dbp == NULL &&
	    (error = xfs_da_read_buf(tp, dp, mp->m_dirdatablk, -1, &dbp,
		    XFS_DATA_FORK))) {
		goto out;
	}
	block = dbp->data;
	ASSERT(be32_to_cpu(block->hdr.magic) == XFS_DIR2_DATA_MAGIC);
	/*
	 * Size of the "leaf" area in the block.
	 */
	size = (uint)sizeof(block->tail) +
	       (uint)sizeof(*lep) * (be16_to_cpu(leaf->hdr.count) - be16_to_cpu(leaf->hdr.stale));
	/*
	 * Look at the last data entry.
	 */
	tagp = (__be16 *)((char *)block + mp->m_dirblksize) - 1;
	dup = (xfs_dir2_data_unused_t *)((char *)block + be16_to_cpu(*tagp));
	/*
	 * If it's not free or is too short we can't do it.
	 */
	if (be16_to_cpu(dup->freetag) != XFS_DIR2_DATA_FREE_TAG ||
	    be16_to_cpu(dup->length) < size) {
		error = 0;
		goto out;
	}
	/*
	 * Start converting it to block form.
	 */
	block->hdr.magic = cpu_to_be32(XFS_DIR2_BLOCK_MAGIC);
	needlog = 1;
	needscan = 0;
	/*
	 * Use up the space at the end of the block (blp/btp).
	 */
	xfs_dir2_data_use_free(tp, dbp, dup, mp->m_dirblksize - size, size,
		&needlog, &needscan);
	/*
	 * Initialize the block tail.
	 */
	btp = xfs_dir2_block_tail_p(mp, block);
	btp->count = cpu_to_be32(be16_to_cpu(leaf->hdr.count) - be16_to_cpu(leaf->hdr.stale));
	btp->stale = 0;
	xfs_dir2_block_log_tail(tp, dbp);
	/*
	 * Initialize the block leaf area.  We compact out stale entries.
	 */
	lep = xfs_dir2_block_leaf_p(btp);
	for (from = to = 0; from < be16_to_cpu(leaf->hdr.count); from++) {
		if (be32_to_cpu(leaf->ents[from].address) == XFS_DIR2_NULL_DATAPTR)
			continue;
		lep[to++] = leaf->ents[from];
	}
	ASSERT(to == be32_to_cpu(btp->count));
	xfs_dir2_block_log_leaf(tp, dbp, 0, be32_to_cpu(btp->count) - 1);
	/*
	 * Scan the bestfree if we need it and log the data block header.
	 */
	if (needscan)
		xfs_dir2_data_freescan(mp, (xfs_dir2_data_t *)block, &needlog);
	if (needlog)
		xfs_dir2_data_log_header(tp, dbp);
	/*
	 * Pitch the old leaf block.
	 */
	error = xfs_da_shrink_inode(args, mp->m_dirleafblk, lbp);
	lbp = NULL;
	if (error) {
		goto out;
	}
	/*
	 * Now see if the resulting block can be shrunken to shortform.
	 */
	if ((size = xfs_dir2_block_sfsize(dp, block, &sfh)) >
	    XFS_IFORK_DSIZE(dp)) {
		error = 0;
		goto out;
	}
	return xfs_dir2_block_to_sf(args, dbp, size, &sfh);
out:
	if (lbp)
		xfs_da_buf_done(lbp);
	if (dbp)
		xfs_da_buf_done(dbp);
	return error;
}
Esempio n. 24
0
File: vpc.c Progetto: binape/qemu
static int vpc_open(BlockDriverState *bs, QDict *options, int flags,
                    Error **errp)
{
    BDRVVPCState *s = bs->opaque;
    int i;
    VHDFooter *footer;
    VHDDynDiskHeader *dyndisk_header;
    uint8_t buf[HEADER_SIZE];
    uint32_t checksum;
    uint64_t computed_size;
    uint64_t pagetable_size;
    int disk_type = VHD_DYNAMIC;
    int ret;

    ret = bdrv_pread(bs->file->bs, 0, s->footer_buf, HEADER_SIZE);
    if (ret < 0) {
        goto fail;
    }

    footer = (VHDFooter *) s->footer_buf;
    if (strncmp(footer->creator, "conectix", 8)) {
        int64_t offset = bdrv_getlength(bs->file->bs);
        if (offset < 0) {
            ret = offset;
            goto fail;
        } else if (offset < HEADER_SIZE) {
            ret = -EINVAL;
            goto fail;
        }

        /* If a fixed disk, the footer is found only at the end of the file */
        ret = bdrv_pread(bs->file->bs, offset-HEADER_SIZE, s->footer_buf,
                         HEADER_SIZE);
        if (ret < 0) {
            goto fail;
        }
        if (strncmp(footer->creator, "conectix", 8)) {
            error_setg(errp, "invalid VPC image");
            ret = -EINVAL;
            goto fail;
        }
        disk_type = VHD_FIXED;
    }

    checksum = be32_to_cpu(footer->checksum);
    footer->checksum = 0;
    if (vpc_checksum(s->footer_buf, HEADER_SIZE) != checksum)
        fprintf(stderr, "block-vpc: The header checksum of '%s' is "
            "incorrect.\n", bs->filename);

    /* Write 'checksum' back to footer, or else will leave it with zero. */
    footer->checksum = cpu_to_be32(checksum);

    // The visible size of a image in Virtual PC depends on the geometry
    // rather than on the size stored in the footer (the size in the footer
    // is too large usually)
    bs->total_sectors = (int64_t)
        be16_to_cpu(footer->cyls) * footer->heads * footer->secs_per_cyl;

    /* Images that have exactly the maximum geometry are probably bigger and
     * would be truncated if we adhered to the geometry for them. Rely on
     * footer->current_size for them. */
    if (bs->total_sectors == VHD_MAX_GEOMETRY) {
        bs->total_sectors = be64_to_cpu(footer->current_size) /
                            BDRV_SECTOR_SIZE;
    }

    /* Allow a maximum disk size of approximately 2 TB */
    if (bs->total_sectors >= VHD_MAX_SECTORS) {
        ret = -EFBIG;
        goto fail;
    }

    if (disk_type == VHD_DYNAMIC) {
        ret = bdrv_pread(bs->file->bs, be64_to_cpu(footer->data_offset), buf,
                         HEADER_SIZE);
        if (ret < 0) {
            goto fail;
        }

        dyndisk_header = (VHDDynDiskHeader *) buf;

        if (strncmp(dyndisk_header->magic, "cxsparse", 8)) {
            ret = -EINVAL;
            goto fail;
        }

        s->block_size = be32_to_cpu(dyndisk_header->block_size);
        if (!is_power_of_2(s->block_size) || s->block_size < BDRV_SECTOR_SIZE) {
            error_setg(errp, "Invalid block size %" PRIu32, s->block_size);
            ret = -EINVAL;
            goto fail;
        }
        s->bitmap_size = ((s->block_size / (8 * 512)) + 511) & ~511;

        s->max_table_entries = be32_to_cpu(dyndisk_header->max_table_entries);

        if ((bs->total_sectors * 512) / s->block_size > 0xffffffffU) {
            ret = -EINVAL;
            goto fail;
        }
        if (s->max_table_entries > (VHD_MAX_SECTORS * 512) / s->block_size) {
            ret = -EINVAL;
            goto fail;
        }

        computed_size = (uint64_t) s->max_table_entries * s->block_size;
        if (computed_size < bs->total_sectors * 512) {
            ret = -EINVAL;
            goto fail;
        }

        if (s->max_table_entries > SIZE_MAX / 4 ||
            s->max_table_entries > (int) INT_MAX / 4) {
            error_setg(errp, "Max Table Entries too large (%" PRId32 ")",
                        s->max_table_entries);
            ret = -EINVAL;
            goto fail;
        }

        pagetable_size = (uint64_t) s->max_table_entries * 4;

        s->pagetable = qemu_try_blockalign(bs->file->bs, pagetable_size);
        if (s->pagetable == NULL) {
            ret = -ENOMEM;
            goto fail;
        }

        s->bat_offset = be64_to_cpu(dyndisk_header->table_offset);

        ret = bdrv_pread(bs->file->bs, s->bat_offset, s->pagetable,
                         pagetable_size);
        if (ret < 0) {
            goto fail;
        }

        s->free_data_block_offset =
            ROUND_UP(s->bat_offset + pagetable_size, 512);

        for (i = 0; i < s->max_table_entries; i++) {
            be32_to_cpus(&s->pagetable[i]);
            if (s->pagetable[i] != 0xFFFFFFFF) {
                int64_t next = (512 * (int64_t) s->pagetable[i]) +
                    s->bitmap_size + s->block_size;

                if (next > s->free_data_block_offset) {
                    s->free_data_block_offset = next;
                }
            }
        }

        if (s->free_data_block_offset > bdrv_getlength(bs->file->bs)) {
            error_setg(errp, "block-vpc: free_data_block_offset points after "
                             "the end of file. The image has been truncated.");
            ret = -EINVAL;
            goto fail;
        }

        s->last_bitmap_offset = (int64_t) -1;

#ifdef CACHE
        s->pageentry_u8 = g_malloc(512);
        s->pageentry_u32 = s->pageentry_u8;
        s->pageentry_u16 = s->pageentry_u8;
        s->last_pagetable = -1;
#endif
    }

    qemu_co_mutex_init(&s->lock);

    /* Disable migration when VHD images are used */
    error_setg(&s->migration_blocker, "The vpc format used by node '%s' "
               "does not support live migration",
               bdrv_get_device_or_node_name(bs));
    migrate_add_blocker(s->migration_blocker);

    return 0;

fail:
    qemu_vfree(s->pagetable);
#ifdef CACHE
    g_free(s->pageentry_u8);
#endif
    return ret;
}
Esempio n. 25
0
static int ar7646_read_data(struct device *dev, u16 *x, u16 *y, u16 *z1, u16 *z2)
{
	struct spi_device	*spi = to_spi_device(dev);
	struct ar7646		*ts = dev_get_drvdata(dev);
	struct ser_req		*req = kzalloc(sizeof *req, GFP_KERNEL);
	int			status;
	int			use_internal;

	u8 tx_buf[6][2],c = 0;
	u16 rx_buf[6];
	if (!req)
		return -ENOMEM;

	spi_message_init(&req->msg);


	tx_buf[c][0] = 0x01;
	tx_buf[c][1] = (u8)((u16)0xd7 << 1);
	req->xfer[c].tx_buf = tx_buf[c];	
	req->xfer[c].len = SET_TX_RX_LEN(2, 2);		
	req->xfer[c].rx_buf = &rx_buf[c];//tt			
	spi_message_add_tail(&req->xfer[c], &req->msg);	
	c++;

	tx_buf[c][0] = 0x01;
	tx_buf[c][1] = ((READ_12BIT_VREF(d_x)| AR_PD10_ALL_ON) << 1);	
	req->xfer[c].tx_buf = tx_buf[c];	
	req->xfer[c].len = SET_TX_RX_LEN(2, 2);		
	req->xfer[c].rx_buf = &rx_buf[c];//tt			
	spi_message_add_tail(&req->xfer[c], &req->msg);	
	c++;

	tx_buf[c][0] = 0x01;
	tx_buf[c][1] = ((READ_12BIT_VREF(d_y)| AR_PD10_ALL_ON) << 1);	
	req->xfer[c].tx_buf = tx_buf[c];	
	req->xfer[c].len = SET_TX_RX_LEN(2, 2);		
	req->xfer[c].rx_buf = &rx_buf[c];//tt			
	spi_message_add_tail(&req->xfer[c], &req->msg);	
	c++;

	tx_buf[c][0] = 0x01;
	tx_buf[c][1] = ((READ_12BIT_VREF(d_z1)| AR_PD10_ALL_ON) << 1);	
	req->xfer[c].tx_buf = tx_buf[c];	
	req->xfer[c].len = SET_TX_RX_LEN(2, 2);		
	req->xfer[c].rx_buf = &rx_buf[c];//tt			
	spi_message_add_tail(&req->xfer[c], &req->msg);	
	c++;

	tx_buf[c][0] = 0x01;
	tx_buf[c][1] = ((READ_12BIT_VREF(d_z2)| AR_PD10_ALL_ON) << 1);	
	req->xfer[c].tx_buf = tx_buf[c];	
	req->xfer[c].len = SET_TX_RX_LEN(2, 2);		
	req->xfer[c].rx_buf = &rx_buf[c];//tt			
	spi_message_add_tail(&req->xfer[c], &req->msg);	
	c++;

	tx_buf[c][0] = 0x01;
	tx_buf[c][1] = 0xe0 << 1;
	req->xfer[c].tx_buf = tx_buf[c];	
	req->xfer[c].len = SET_TX_RX_LEN(2, 2);		
	req->xfer[c].rx_buf = &rx_buf[c];//tt			
	spi_message_add_tail(&req->xfer[c], &req->msg);		
	
	
//	ts->irq_disabled = 1;
	disable_irq(spi->irq);
	status = spi_sync(spi, &req->msg);
//	ts->irq_disabled = 0;
	enable_irq(spi->irq);

	/* on-wire is a must-ignore bit, a BE12 value, then padding */
	rx_buf[1] = be16_to_cpu(rx_buf[1]);	
	*x = rx_buf[1] >> 4;
	*x &= 0x0fff;

	rx_buf[2] = be16_to_cpu(rx_buf[2]);	
	*y = rx_buf[2] >> 4;
	*y &= 0x0fff;

	rx_buf[3] = be16_to_cpu(rx_buf[3]);	
	*z1 = rx_buf[3] >> 4;
	*z1 &= 0x0fff;

	rx_buf[4] = be16_to_cpu(rx_buf[4]);	
	*z2 = rx_buf[4] >> 4;
	*z2 &= 0x0fff;


//	DEBUG("--tx=0x%4x rx =0x%4x -> 0x%4x\n",command,req->sample,status);
	kfree(req);
	return status;
	
}
Esempio n. 26
0
File: recv.c Progetto: 020gzh/linux
/*
 * The transport must make sure that this is serialized against other
 * rx and conn reset on this specific conn.
 *
 * We currently assert that only one fragmented message will be sent
 * down a connection at a time.  This lets us reassemble in the conn
 * instead of per-flow which means that we don't have to go digging through
 * flows to tear down partial reassembly progress on conn failure and
 * we save flow lookup and locking for each frag arrival.  It does mean
 * that small messages will wait behind large ones.  Fragmenting at all
 * is only to reduce the memory consumption of pre-posted buffers.
 *
 * The caller passes in saddr and daddr instead of us getting it from the
 * conn.  This lets loopback, who only has one conn for both directions,
 * tell us which roles the addrs in the conn are playing for this message.
 */
void rds_recv_incoming(struct rds_connection *conn, __be32 saddr, __be32 daddr,
		       struct rds_incoming *inc, gfp_t gfp)
{
	struct rds_sock *rs = NULL;
	struct sock *sk;
	unsigned long flags;

	inc->i_conn = conn;
	inc->i_rx_jiffies = jiffies;

	rdsdebug("conn %p next %llu inc %p seq %llu len %u sport %u dport %u "
		 "flags 0x%x rx_jiffies %lu\n", conn,
		 (unsigned long long)conn->c_next_rx_seq,
		 inc,
		 (unsigned long long)be64_to_cpu(inc->i_hdr.h_sequence),
		 be32_to_cpu(inc->i_hdr.h_len),
		 be16_to_cpu(inc->i_hdr.h_sport),
		 be16_to_cpu(inc->i_hdr.h_dport),
		 inc->i_hdr.h_flags,
		 inc->i_rx_jiffies);

	/*
	 * Sequence numbers should only increase.  Messages get their
	 * sequence number as they're queued in a sending conn.  They
	 * can be dropped, though, if the sending socket is closed before
	 * they hit the wire.  So sequence numbers can skip forward
	 * under normal operation.  They can also drop back in the conn
	 * failover case as previously sent messages are resent down the
	 * new instance of a conn.  We drop those, otherwise we have
	 * to assume that the next valid seq does not come after a
	 * hole in the fragment stream.
	 *
	 * The headers don't give us a way to realize if fragments of
	 * a message have been dropped.  We assume that frags that arrive
	 * to a flow are part of the current message on the flow that is
	 * being reassembled.  This means that senders can't drop messages
	 * from the sending conn until all their frags are sent.
	 *
	 * XXX we could spend more on the wire to get more robust failure
	 * detection, arguably worth it to avoid data corruption.
	 */
	if (be64_to_cpu(inc->i_hdr.h_sequence) < conn->c_next_rx_seq &&
	    (inc->i_hdr.h_flags & RDS_FLAG_RETRANSMITTED)) {
		rds_stats_inc(s_recv_drop_old_seq);
		goto out;
	}
	conn->c_next_rx_seq = be64_to_cpu(inc->i_hdr.h_sequence) + 1;

	if (rds_sysctl_ping_enable && inc->i_hdr.h_dport == 0) {
		rds_stats_inc(s_recv_ping);
		rds_send_pong(conn, inc->i_hdr.h_sport);
		goto out;
	}

	rs = rds_find_bound(daddr, inc->i_hdr.h_dport);
	if (!rs) {
		rds_stats_inc(s_recv_drop_no_sock);
		goto out;
	}

	/* Process extension headers */
	rds_recv_incoming_exthdrs(inc, rs);

	/* We can be racing with rds_release() which marks the socket dead. */
	sk = rds_rs_to_sk(rs);

	/* serialize with rds_release -> sock_orphan */
	write_lock_irqsave(&rs->rs_recv_lock, flags);
	if (!sock_flag(sk, SOCK_DEAD)) {
		rdsdebug("adding inc %p to rs %p's recv queue\n", inc, rs);
		rds_stats_inc(s_recv_queued);
		rds_recv_rcvbuf_delta(rs, sk, inc->i_conn->c_lcong,
				      be32_to_cpu(inc->i_hdr.h_len),
				      inc->i_hdr.h_dport);
		if (sock_flag(sk, SOCK_RCVTSTAMP))
			do_gettimeofday(&inc->i_rx_tstamp);
		rds_inc_addref(inc);
		list_add_tail(&inc->i_item, &rs->rs_recv_queue);
		__rds_wake_sk_sleep(sk);
	} else {
		rds_stats_inc(s_recv_drop_dead_sock);
	}
	write_unlock_irqrestore(&rs->rs_recv_lock, flags);

out:
	if (rs)
		rds_sock_put(rs);
}
Esempio n. 27
0
/*
 * Copy out attribute list entries for attr_list(), for leaf attribute lists.
 */
int
xfs_attr3_leaf_list_int(
    struct xfs_buf			*bp,
    struct xfs_attr_list_context	*context)
{
    struct attrlist_cursor_kern	*cursor;
    struct xfs_attr_leafblock	*leaf;
    struct xfs_attr3_icleaf_hdr	ichdr;
    struct xfs_attr_leaf_entry	*entries;
    struct xfs_attr_leaf_entry	*entry;
    int				retval;
    int				i;

    trace_xfs_attr_list_leaf(context);

    leaf = bp->b_addr;
    xfs_attr3_leaf_hdr_from_disk(&ichdr, leaf);
    entries = xfs_attr3_leaf_entryp(leaf);

    cursor = context->cursor;
    cursor->initted = 1;

    /*
     * Re-find our place in the leaf block if this is a new syscall.
     */
    if (context->resynch) {
        entry = &entries[0];
        for (i = 0; i < ichdr.count; entry++, i++) {
            if (be32_to_cpu(entry->hashval) == cursor->hashval) {
                if (cursor->offset == context->dupcnt) {
                    context->dupcnt = 0;
                    break;
                }
                context->dupcnt++;
            } else if (be32_to_cpu(entry->hashval) >
                       cursor->hashval) {
                context->dupcnt = 0;
                break;
            }
        }
        if (i == ichdr.count) {
            trace_xfs_attr_list_notfound(context);
            return 0;
        }
    } else {
        entry = &entries[0];
        i = 0;
    }
    context->resynch = 0;

    /*
     * We have found our place, start copying out the new attributes.
     */
    retval = 0;
    for (; i < ichdr.count; entry++, i++) {
        if (be32_to_cpu(entry->hashval) != cursor->hashval) {
            cursor->hashval = be32_to_cpu(entry->hashval);
            cursor->offset = 0;
        }

        if (entry->flags & XFS_ATTR_INCOMPLETE)
            continue;		/* skip incomplete entries */

        if (entry->flags & XFS_ATTR_LOCAL) {
            xfs_attr_leaf_name_local_t *name_loc =
                xfs_attr3_leaf_name_local(leaf, i);

            retval = context->put_listent(context,
                                          entry->flags,
                                          name_loc->nameval,
                                          (int)name_loc->namelen,
                                          be16_to_cpu(name_loc->valuelen),
                                          &name_loc->nameval[name_loc->namelen]);
            if (retval)
                return retval;
        } else {
            xfs_attr_leaf_name_remote_t *name_rmt =
                xfs_attr3_leaf_name_remote(leaf, i);

            int valuelen = be32_to_cpu(name_rmt->valuelen);

            if (context->put_value) {
                xfs_da_args_t args;

                memset((char *)&args, 0, sizeof(args));
                args.dp = context->dp;
                args.whichfork = XFS_ATTR_FORK;
                args.valuelen = valuelen;
                args.value = kmem_alloc(valuelen, KM_SLEEP | KM_NOFS);
                args.rmtblkno = be32_to_cpu(name_rmt->valueblk);
                args.rmtblkcnt = xfs_attr3_rmt_blocks(
                                     args.dp->i_mount, valuelen);
                retval = xfs_attr_rmtval_get(&args);
                if (retval)
                    return retval;
                retval = context->put_listent(context,
                                              entry->flags,
                                              name_rmt->name,
                                              (int)name_rmt->namelen,
                                              valuelen,
                                              args.value);
                kmem_free(args.value);
            } else {
                retval = context->put_listent(context,
                                              entry->flags,
                                              name_rmt->name,
                                              (int)name_rmt->namelen,
                                              valuelen,
                                              NULL);
            }
            if (retval)
                return retval;
        }
        if (context->seen_enough)
            break;
        cursor->offset++;
    }
    trace_xfs_attr_list_leaf_end(context);
    return retval;
}
Esempio n. 28
0
/**
 * iscsi_sw_tcp_xmit_segment - transmit segment
 * @tcp_conn: the iSCSI TCP connection
 * @segment: the buffer to transmnit
 *
 * This function transmits as much of the buffer as
 * the network layer will accept, and returns the number of
 * bytes transmitted.
 *
 * If CRC hashing is enabled, the function will compute the
 * hash as it goes. When the entire segment has been transmitted,
 * it will retrieve the hash value and send it as well.
 */
static int iscsi_sw_tcp_xmit_segment(struct iscsi_tcp_conn *tcp_conn,
				     struct iscsi_segment *segment)
{
	struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;
	struct socket *sk = tcp_sw_conn->sock;
	unsigned int copied = 0;
	int r = 0;

	while (!iscsi_tcp_segment_done(tcp_conn, segment, 0, r)) {
		struct scatterlist *sg;
		unsigned int offset, copy;
		int flags = 0;

		r = 0;
		offset = segment->copied;
		copy = segment->size - offset;

		if (segment->total_copied + segment->size < segment->total_size)
			flags |= MSG_MORE;

		/* Use sendpage if we can; else fall back to sendmsg */
		if (!segment->data) {
			sg = segment->sg;
			offset += segment->sg_offset + sg->offset;
			r = tcp_sw_conn->sendpage(sk, sg_page(sg), offset,
						  copy, flags);
		} else {
			struct msghdr msg = { .msg_flags = flags };
			struct kvec iov = {
				.iov_base = segment->data + offset,
				.iov_len = copy
			};

			r = kernel_sendmsg(sk, &msg, &iov, 1, copy);
		}

		if (r < 0) {
			iscsi_tcp_segment_unmap(segment);
			return r;
		}
		copied += r;
	}
	return copied;
}

/**
 * iscsi_sw_tcp_xmit - TCP transmit
 **/
static int iscsi_sw_tcp_xmit(struct iscsi_conn *conn)
{
	struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
	struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;
	struct iscsi_segment *segment = &tcp_sw_conn->out.segment;
	unsigned int consumed = 0;
	int rc = 0;

	while (1) {
		rc = iscsi_sw_tcp_xmit_segment(tcp_conn, segment);
		/*
		 * We may not have been able to send data because the conn
		 * is getting stopped. libiscsi will know so propogate err
		 * for it to do the right thing.
		 */
		if (rc == -EAGAIN)
			return rc;
		else if (rc < 0) {
			rc = ISCSI_ERR_XMIT_FAILED;
			goto error;
		} else if (rc == 0)
			break;

		consumed += rc;

		if (segment->total_copied >= segment->total_size) {
			if (segment->done != NULL) {
				rc = segment->done(tcp_conn, segment);
				if (rc != 0)
					goto error;
			}
		}
	}

	ISCSI_SW_TCP_DBG(conn, "xmit %d bytes\n", consumed);

	conn->txdata_octets += consumed;
	return consumed;

error:
	/* Transmit error. We could initiate error recovery
	 * here. */
	ISCSI_SW_TCP_DBG(conn, "Error sending PDU, errno=%d\n", rc);
	iscsi_conn_failure(conn, rc);
	return -EIO;
}

/**
 * iscsi_tcp_xmit_qlen - return the number of bytes queued for xmit
 */
static inline int iscsi_sw_tcp_xmit_qlen(struct iscsi_conn *conn)
{
	struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
	struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;
	struct iscsi_segment *segment = &tcp_sw_conn->out.segment;

	return segment->total_copied - segment->total_size;
}

static int iscsi_sw_tcp_pdu_xmit(struct iscsi_task *task)
{
	struct iscsi_conn *conn = task->conn;
	int rc;

	while (iscsi_sw_tcp_xmit_qlen(conn)) {
		rc = iscsi_sw_tcp_xmit(conn);
		if (rc == 0)
			return -EAGAIN;
		if (rc < 0)
			return rc;
	}

	return 0;
}

/*
 * This is called when we're done sending the header.
 * Simply copy the data_segment to the send segment, and return.
 */
static int iscsi_sw_tcp_send_hdr_done(struct iscsi_tcp_conn *tcp_conn,
				      struct iscsi_segment *segment)
{
	struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;

	tcp_sw_conn->out.segment = tcp_sw_conn->out.data_segment;
	ISCSI_SW_TCP_DBG(tcp_conn->iscsi_conn,
			 "Header done. Next segment size %u total_size %u\n",
			 tcp_sw_conn->out.segment.size,
			 tcp_sw_conn->out.segment.total_size);
	return 0;
}

static void iscsi_sw_tcp_send_hdr_prep(struct iscsi_conn *conn, void *hdr,
				       size_t hdrlen)
{
	struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
	struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;

	ISCSI_SW_TCP_DBG(conn, "%s\n", conn->hdrdgst_en ?
			 "digest enabled" : "digest disabled");

	/* Clear the data segment - needs to be filled in by the
	 * caller using iscsi_tcp_send_data_prep() */
	memset(&tcp_sw_conn->out.data_segment, 0,
	       sizeof(struct iscsi_segment));

	/* If header digest is enabled, compute the CRC and
	 * place the digest into the same buffer. We make
	 * sure that both iscsi_tcp_task and mtask have
	 * sufficient room.
	 */
	if (conn->hdrdgst_en) {
		iscsi_tcp_dgst_header(&tcp_sw_conn->tx_hash, hdr, hdrlen,
				      hdr + hdrlen);
		hdrlen += ISCSI_DIGEST_SIZE;
	}

	/* Remember header pointer for later, when we need
	 * to decide whether there's a payload to go along
	 * with the header. */
	tcp_sw_conn->out.hdr = hdr;

	iscsi_segment_init_linear(&tcp_sw_conn->out.segment, hdr, hdrlen,
				  iscsi_sw_tcp_send_hdr_done, NULL);
}

/*
 * Prepare the send buffer for the payload data.
 * Padding and checksumming will all be taken care
 * of by the iscsi_segment routines.
 */
static int
iscsi_sw_tcp_send_data_prep(struct iscsi_conn *conn, struct scatterlist *sg,
			    unsigned int count, unsigned int offset,
			    unsigned int len)
{
	struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
	struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;
	struct hash_desc *tx_hash = NULL;
	unsigned int hdr_spec_len;

	ISCSI_SW_TCP_DBG(conn, "offset=%d, datalen=%d %s\n", offset, len,
			 conn->datadgst_en ?
			 "digest enabled" : "digest disabled");

	/* Make sure the datalen matches what the caller
	   said he would send. */
	hdr_spec_len = ntoh24(tcp_sw_conn->out.hdr->dlength);
	WARN_ON(iscsi_padded(len) != iscsi_padded(hdr_spec_len));

	if (conn->datadgst_en)
		tx_hash = &tcp_sw_conn->tx_hash;

	return iscsi_segment_seek_sg(&tcp_sw_conn->out.data_segment,
				     sg, count, offset, len,
				     NULL, tx_hash);
}

static void
iscsi_sw_tcp_send_linear_data_prep(struct iscsi_conn *conn, void *data,
				   size_t len)
{
	struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
	struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;
	struct hash_desc *tx_hash = NULL;
	unsigned int hdr_spec_len;

	ISCSI_SW_TCP_DBG(conn, "datalen=%zd %s\n", len, conn->datadgst_en ?
			 "digest enabled" : "digest disabled");

	/* Make sure the datalen matches what the caller
	   said he would send. */
	hdr_spec_len = ntoh24(tcp_sw_conn->out.hdr->dlength);
	WARN_ON(iscsi_padded(len) != iscsi_padded(hdr_spec_len));

	if (conn->datadgst_en)
		tx_hash = &tcp_sw_conn->tx_hash;

	iscsi_segment_init_linear(&tcp_sw_conn->out.data_segment,
				data, len, NULL, tx_hash);
}

static int iscsi_sw_tcp_pdu_init(struct iscsi_task *task,
				 unsigned int offset, unsigned int count)
{
	struct iscsi_conn *conn = task->conn;
	int err = 0;

	iscsi_sw_tcp_send_hdr_prep(conn, task->hdr, task->hdr_len);

	if (!count)
		return 0;

	if (!task->sc)
		iscsi_sw_tcp_send_linear_data_prep(conn, task->data, count);
	else {
		struct scsi_data_buffer *sdb = scsi_out(task->sc);

		err = iscsi_sw_tcp_send_data_prep(conn, sdb->table.sgl,
						  sdb->table.nents, offset,
						  count);
	}

	if (err) {
		/* got invalid offset/len */
		return -EIO;
	}
	return 0;
}

static int iscsi_sw_tcp_pdu_alloc(struct iscsi_task *task, uint8_t opcode)
{
	struct iscsi_tcp_task *tcp_task = task->dd_data;

	task->hdr = task->dd_data + sizeof(*tcp_task);
	task->hdr_max = sizeof(struct iscsi_sw_tcp_hdrbuf) - ISCSI_DIGEST_SIZE;
	return 0;
}

static struct iscsi_cls_conn *
iscsi_sw_tcp_conn_create(struct iscsi_cls_session *cls_session,
			 uint32_t conn_idx)
{
	struct iscsi_conn *conn;
	struct iscsi_cls_conn *cls_conn;
	struct iscsi_tcp_conn *tcp_conn;
	struct iscsi_sw_tcp_conn *tcp_sw_conn;

	cls_conn = iscsi_tcp_conn_setup(cls_session, sizeof(*tcp_sw_conn),
					conn_idx);
	if (!cls_conn)
		return NULL;
	conn = cls_conn->dd_data;
	tcp_conn = conn->dd_data;
	tcp_sw_conn = tcp_conn->dd_data;

	tcp_sw_conn->tx_hash.tfm = crypto_alloc_hash("crc32c", 0,
						     CRYPTO_ALG_ASYNC);
	tcp_sw_conn->tx_hash.flags = 0;
	if (IS_ERR(tcp_sw_conn->tx_hash.tfm))
		goto free_conn;

	tcp_sw_conn->rx_hash.tfm = crypto_alloc_hash("crc32c", 0,
						     CRYPTO_ALG_ASYNC);
	tcp_sw_conn->rx_hash.flags = 0;
	if (IS_ERR(tcp_sw_conn->rx_hash.tfm))
		goto free_tx_tfm;
	tcp_conn->rx_hash = &tcp_sw_conn->rx_hash;

	return cls_conn;

free_tx_tfm:
	crypto_free_hash(tcp_sw_conn->tx_hash.tfm);
free_conn:
	iscsi_conn_printk(KERN_ERR, conn,
			  "Could not create connection due to crc32c "
			  "loading error. Make sure the crc32c "
			  "module is built as a module or into the "
			  "kernel\n");
	iscsi_tcp_conn_teardown(cls_conn);
	return NULL;
}

static void iscsi_sw_tcp_release_conn(struct iscsi_conn *conn)
{
	struct iscsi_session *session = conn->session;
	struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
	struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;
	struct socket *sock = tcp_sw_conn->sock;

	if (!sock)
		return;

	sock_hold(sock->sk);
	iscsi_sw_tcp_conn_restore_callbacks(tcp_sw_conn);
	sock_put(sock->sk);

	spin_lock_bh(&session->lock);
	tcp_sw_conn->sock = NULL;
	spin_unlock_bh(&session->lock);
	sockfd_put(sock);
}

static void iscsi_sw_tcp_conn_destroy(struct iscsi_cls_conn *cls_conn)
{
	struct iscsi_conn *conn = cls_conn->dd_data;
	struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
	struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;

	iscsi_sw_tcp_release_conn(conn);

	if (tcp_sw_conn->tx_hash.tfm)
		crypto_free_hash(tcp_sw_conn->tx_hash.tfm);
	if (tcp_sw_conn->rx_hash.tfm)
		crypto_free_hash(tcp_sw_conn->rx_hash.tfm);

	iscsi_tcp_conn_teardown(cls_conn);
}

static void iscsi_sw_tcp_conn_stop(struct iscsi_cls_conn *cls_conn, int flag)
{
	struct iscsi_conn *conn = cls_conn->dd_data;
	struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
	struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;
	struct socket *sock = tcp_sw_conn->sock;

	/* userspace may have goofed up and not bound us */
	if (!sock)
		return;
	/*
	 * Make sure our recv side is stopped.
	 * Older tools called conn stop before ep_disconnect
	 * so IO could still be coming in.
	 */
	write_lock_bh(&tcp_sw_conn->sock->sk->sk_callback_lock);
	set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_rx);
	write_unlock_bh(&tcp_sw_conn->sock->sk->sk_callback_lock);

	if (sock->sk->sk_sleep && waitqueue_active(sock->sk->sk_sleep)) {
		sock->sk->sk_err = EIO;
		wake_up_interruptible(sock->sk->sk_sleep);
	}

	iscsi_conn_stop(cls_conn, flag);
	iscsi_sw_tcp_release_conn(conn);
}

static int iscsi_sw_tcp_get_addr(struct iscsi_conn *conn, struct socket *sock,
				 char *buf, int *port,
				 int (*getname)(struct socket *,
						struct sockaddr *,
						int *addrlen))
{
	struct sockaddr_storage *addr;
	struct sockaddr_in6 *sin6;
	struct sockaddr_in *sin;
	int rc = 0, len;

	addr = kmalloc(sizeof(*addr), GFP_KERNEL);
	if (!addr)
		return -ENOMEM;

	if (getname(sock, (struct sockaddr *) addr, &len)) {
		rc = -ENODEV;
		goto free_addr;
	}

	switch (addr->ss_family) {
	case AF_INET:
		sin = (struct sockaddr_in *)addr;
		spin_lock_bh(&conn->session->lock);
		sprintf(buf, "%pI4", &sin->sin_addr.s_addr);
		*port = be16_to_cpu(sin->sin_port);
		spin_unlock_bh(&conn->session->lock);
		break;
	case AF_INET6:
		sin6 = (struct sockaddr_in6 *)addr;
		spin_lock_bh(&conn->session->lock);
		sprintf(buf, "%pI6", &sin6->sin6_addr);
		*port = be16_to_cpu(sin6->sin6_port);
		spin_unlock_bh(&conn->session->lock);
		break;
	}
free_addr:
	kfree(addr);
	return rc;
}

static int
iscsi_sw_tcp_conn_bind(struct iscsi_cls_session *cls_session,
		       struct iscsi_cls_conn *cls_conn, uint64_t transport_eph,
		       int is_leading)
{
	struct Scsi_Host *shost = iscsi_session_to_shost(cls_session);
	struct iscsi_host *ihost = shost_priv(shost);
	struct iscsi_conn *conn = cls_conn->dd_data;
	struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
	struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;
	struct sock *sk;
	struct socket *sock;
	int err;

	/* lookup for existing socket */
	sock = sockfd_lookup((int)transport_eph, &err);
	if (!sock) {
		iscsi_conn_printk(KERN_ERR, conn,
				  "sockfd_lookup failed %d\n", err);
		return -EEXIST;
	}
	/*
	 * copy these values now because if we drop the session
	 * userspace may still want to query the values since we will
	 * be using them for the reconnect
	 */
	err = iscsi_sw_tcp_get_addr(conn, sock, conn->portal_address,
				    &conn->portal_port, kernel_getpeername);
	if (err)
		goto free_socket;

	err = iscsi_sw_tcp_get_addr(conn, sock, ihost->local_address,
				    &ihost->local_port, kernel_getsockname);
	if (err)
		goto free_socket;

	err = iscsi_conn_bind(cls_session, cls_conn, is_leading);
	if (err)
		goto free_socket;

	/* bind iSCSI connection and socket */
	tcp_sw_conn->sock = sock;

	/* setup Socket parameters */
	sk = sock->sk;
	sk->sk_reuse = 1;
	sk->sk_sndtimeo = 15 * HZ; /* FIXME: make it configurable */
	sk->sk_allocation = GFP_ATOMIC;

	iscsi_sw_tcp_conn_set_callbacks(conn);
	tcp_sw_conn->sendpage = tcp_sw_conn->sock->ops->sendpage;
	/*
	 * set receive state machine into initial state
	 */
	iscsi_tcp_hdr_recv_prep(tcp_conn);
	return 0;

free_socket:
	sockfd_put(sock);
	return err;
}

static int iscsi_sw_tcp_conn_set_param(struct iscsi_cls_conn *cls_conn,
				       enum iscsi_param param, char *buf,
				       int buflen)
{
	struct iscsi_conn *conn = cls_conn->dd_data;
	struct iscsi_session *session = conn->session;
	struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
	struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;
	int value;

	switch(param) {
	case ISCSI_PARAM_HDRDGST_EN:
		iscsi_set_param(cls_conn, param, buf, buflen);
		break;
	case ISCSI_PARAM_DATADGST_EN:
		iscsi_set_param(cls_conn, param, buf, buflen);
		tcp_sw_conn->sendpage = conn->datadgst_en ?
			sock_no_sendpage : tcp_sw_conn->sock->ops->sendpage;
		break;
	case ISCSI_PARAM_MAX_R2T:
		sscanf(buf, "%d", &value);
		if (value <= 0 || !is_power_of_2(value))
			return -EINVAL;
		if (session->max_r2t == value)
			break;
		iscsi_tcp_r2tpool_free(session);
		iscsi_set_param(cls_conn, param, buf, buflen);
		if (iscsi_tcp_r2tpool_alloc(session))
			return -ENOMEM;
		break;
	default:
		return iscsi_set_param(cls_conn, param, buf, buflen);
	}

	return 0;
}

static int iscsi_sw_tcp_conn_get_param(struct iscsi_cls_conn *cls_conn,
				       enum iscsi_param param, char *buf)
{
	struct iscsi_conn *conn = cls_conn->dd_data;
	int len;

	switch(param) {
	case ISCSI_PARAM_CONN_PORT:
		spin_lock_bh(&conn->session->lock);
		len = sprintf(buf, "%hu\n", conn->portal_port);
		spin_unlock_bh(&conn->session->lock);
		break;
	case ISCSI_PARAM_CONN_ADDRESS:
		spin_lock_bh(&conn->session->lock);
		len = sprintf(buf, "%s\n", conn->portal_address);
		spin_unlock_bh(&conn->session->lock);
		break;
	default:
		return iscsi_conn_get_param(cls_conn, param, buf);
	}

	return len;
}

static void
iscsi_sw_tcp_conn_get_stats(struct iscsi_cls_conn *cls_conn,
			    struct iscsi_stats *stats)
{
	struct iscsi_conn *conn = cls_conn->dd_data;
	struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
	struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;

	stats->custom_length = 3;
	strcpy(stats->custom[0].desc, "tx_sendpage_failures");
	stats->custom[0].value = tcp_sw_conn->sendpage_failures_cnt;
	strcpy(stats->custom[1].desc, "rx_discontiguous_hdr");
	stats->custom[1].value = tcp_sw_conn->discontiguous_hdr_cnt;
	strcpy(stats->custom[2].desc, "eh_abort_cnt");
	stats->custom[2].value = conn->eh_abort_cnt;

	iscsi_tcp_conn_get_stats(cls_conn, stats);
}

static struct iscsi_cls_session *
iscsi_sw_tcp_session_create(struct iscsi_endpoint *ep, uint16_t cmds_max,
			    uint16_t qdepth, uint32_t initial_cmdsn)
{
	struct iscsi_cls_session *cls_session;
	struct iscsi_session *session;
	struct Scsi_Host *shost;

	if (ep) {
		printk(KERN_ERR "iscsi_tcp: invalid ep %p.\n", ep);
		return NULL;
	}

	shost = iscsi_host_alloc(&iscsi_sw_tcp_sht, 0, 1);
	if (!shost)
		return NULL;
	shost->transportt = iscsi_sw_tcp_scsi_transport;
	shost->cmd_per_lun = qdepth;
	shost->max_lun = iscsi_max_lun;
	shost->max_id = 0;
	shost->max_channel = 0;
	shost->max_cmd_len = SCSI_MAX_VARLEN_CDB_SIZE;

	if (iscsi_host_add(shost, NULL))
		goto free_host;

	cls_session = iscsi_session_setup(&iscsi_sw_tcp_transport, shost,
					  cmds_max, 0,
					  sizeof(struct iscsi_tcp_task) +
					  sizeof(struct iscsi_sw_tcp_hdrbuf),
					  initial_cmdsn, 0);
	if (!cls_session)
		goto remove_host;
	session = cls_session->dd_data;

	shost->can_queue = session->scsi_cmds_max;
	if (iscsi_tcp_r2tpool_alloc(session))
		goto remove_session;
	return cls_session;

remove_session:
	iscsi_session_teardown(cls_session);
remove_host:
	iscsi_host_remove(shost);
free_host:
	iscsi_host_free(shost);
	return NULL;
}

static void iscsi_sw_tcp_session_destroy(struct iscsi_cls_session *cls_session)
{
	struct Scsi_Host *shost = iscsi_session_to_shost(cls_session);

	iscsi_tcp_r2tpool_free(cls_session->dd_data);
	iscsi_session_teardown(cls_session);

	iscsi_host_remove(shost);
	iscsi_host_free(shost);
}

static int iscsi_sw_tcp_slave_alloc(struct scsi_device *sdev)
{
	set_bit(QUEUE_FLAG_BIDI, &sdev->request_queue->queue_flags);
	return 0;
}
Esempio n. 29
0
static int sgi_get_bootpartition(struct fdisk_context *cxt)
{
	struct sgi_disklabel *sgilabel = self_disklabel(cxt);
	return be16_to_cpu(sgilabel->root_part_num);
}
Esempio n. 30
0
File: brec.c Progetto: hajuuk/R7000
int hfs_brec_insert(hfsplus_handle_t *hfsplus_handle, struct hfs_find_data *fd, void *entry, int entry_len)
{
	struct hfs_btree *tree;
	struct hfs_bnode *node, *new_node;
	int size, key_len, rec;
	int data_off, end_off;
	int idx_rec_off, data_rec_off, end_rec_off;
	__be32 cnid;

	tree = fd->tree;
	if (!fd->bnode) {
		if (!tree->root)
			hfs_btree_inc_height(hfsplus_handle, tree);
		fd->bnode = hfs_bnode_find(hfsplus_handle, tree, tree->leaf_head);
		if (IS_ERR(fd->bnode))
			return PTR_ERR(fd->bnode);
		fd->record = -1;
	}
	new_node = NULL;
	key_len = be16_to_cpu(fd->search_key->key_len) + 2;
again:
	/* new record idx and complete record size */
	rec = fd->record + 1;
	size = key_len + entry_len;

	node = fd->bnode;
	hfs_bnode_dump(node);
	/* get last offset */
	end_rec_off = tree->node_size - (node->num_recs + 1) * 2;
	end_off = hfs_bnode_read_u16(node, end_rec_off);
	end_rec_off -= 2;
	dprint(DBG_BNODE_MOD, "insert_rec: %d, %d, %d, %d\n", rec, size, end_off, end_rec_off);
	if (size > end_rec_off - end_off) {
		if (new_node)
			panic("not enough room!\n");
		new_node = hfs_bnode_split(hfsplus_handle, fd);
		if (IS_ERR(new_node))
			return PTR_ERR(new_node);
		goto again;
	}
	if (node->type == HFS_NODE_LEAF) {
		tree->leaf_count++;
		if (hfsplus_journalled_mark_inode_dirty(__FUNCTION__, hfsplus_handle, tree->inode))
			return -1;
	}
	node->num_recs++;
	/* write new last offset */
	hfs_bnode_write_u16(hfsplus_handle, node, offsetof(struct hfs_bnode_desc, num_recs), node->num_recs);
	hfs_bnode_write_u16(hfsplus_handle, node, end_rec_off, end_off + size);
	data_off = end_off;
	data_rec_off = end_rec_off + 2;
	idx_rec_off = tree->node_size - (rec + 1) * 2;
	if (idx_rec_off == data_rec_off)
		goto skip;
	/* move all following entries */
	do {
		data_off = hfs_bnode_read_u16(node, data_rec_off + 2);
		hfs_bnode_write_u16(hfsplus_handle, node, data_rec_off, data_off + size);
		data_rec_off += 2;
	} while (data_rec_off < idx_rec_off);

	/* move data away */
	hfs_bnode_move(hfsplus_handle, node, data_off + size, data_off,
		       end_off - data_off);

skip:
	hfs_bnode_write(hfsplus_handle, node, fd->search_key, data_off, key_len);
	hfs_bnode_write(hfsplus_handle, node, entry, data_off + key_len, entry_len);
	hfs_bnode_dump(node);

	if (new_node) {
		/* update parent key if we inserted a key
		 * at the start of the first node
		 */
		if (!rec && new_node != node)
			hfs_brec_update_parent(hfsplus_handle, fd);

		hfs_bnode_put(hfsplus_handle, fd->bnode);
		if (!new_node->parent) {
			hfs_btree_inc_height(hfsplus_handle, tree);
			new_node->parent = tree->root;
		}
		fd->bnode = hfs_bnode_find(hfsplus_handle, tree, new_node->parent);

		/* create index data entry */
		cnid = cpu_to_be32(new_node->this);
		entry = &cnid;
		entry_len = sizeof(cnid);

		/* get index key */
		hfs_bnode_read_key(new_node, fd->search_key, 14);
		__hfs_brec_find(fd->bnode, fd);

		hfs_bnode_put(hfsplus_handle, new_node);
		new_node = NULL;

		if (tree->attributes & HFS_TREE_VARIDXKEYS)
			key_len = be16_to_cpu(fd->search_key->key_len) + 2;
		else {
			fd->search_key->key_len = cpu_to_be16(tree->max_key_len);
			key_len = tree->max_key_len + 2;
		}
		goto again;
	}

	if (!rec)
		hfs_brec_update_parent(hfsplus_handle, fd);

	return 0;
}