예제 #1
0
파일: t4vf_hw.c 프로젝트: BozkurTR/kernel
/**
 *	t4vf_config_rss_range - configure a portion of the RSS mapping table
 *	@adapter: the adapter
 *	@viid: Virtual Interface of RSS Table Slice
 *	@start: starting entry in the table to write
 *	@n: how many table entries to write
 *	@rspq: values for the "Response Queue" (Ingress Queue) lookup table
 *	@nrspq: number of values in @rspq
 *
 *	Programs the selected part of the VI's RSS mapping table with the
 *	provided values.  If @nrspq < @n the supplied values are used repeatedly
 *	until the full table range is populated.
 *
 *	The caller must ensure the values in @rspq are in the range 0..1023.
 */
int t4vf_config_rss_range(struct adapter *adapter, unsigned int viid,
			  int start, int n, const u16 *rspq, int nrspq)
{
	const u16 *rsp = rspq;
	const u16 *rsp_end = rspq+nrspq;
	struct fw_rss_ind_tbl_cmd cmd;

	/*
	 * Initialize firmware command template to write the RSS table.
	 */
	memset(&cmd, 0, sizeof(cmd));
	cmd.op_to_viid = cpu_to_be32(FW_CMD_OP(FW_RSS_IND_TBL_CMD) |
				     FW_CMD_REQUEST |
				     FW_CMD_WRITE |
				     FW_RSS_IND_TBL_CMD_VIID(viid));
	cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));

	/*
	 * Each firmware RSS command can accommodate up to 32 RSS Ingress
	 * Queue Identifiers.  These Ingress Queue IDs are packed three to
	 * a 32-bit word as 10-bit values with the upper remaining 2 bits
	 * reserved.
	 */
	while (n > 0) {
		__be32 *qp = &cmd.iq0_to_iq2;
		int nq = min(n, 32);
		int ret;

		/*
		 * Set up the firmware RSS command header to send the next
		 * "nq" Ingress Queue IDs to the firmware.
		 */
		cmd.niqid = cpu_to_be16(nq);
		cmd.startidx = cpu_to_be16(start);

		/*
		 * "nq" more done for the start of the next loop.
		 */
		start += nq;
		n -= nq;

		/*
		 * While there are still Ingress Queue IDs to stuff into the
		 * current firmware RSS command, retrieve them from the
		 * Ingress Queue ID array and insert them into the command.
		 */
		while (nq > 0) {
			/*
			 * Grab up to the next 3 Ingress Queue IDs (wrapping
			 * around the Ingress Queue ID array if necessary) and
			 * insert them into the firmware RSS command at the
			 * current 3-tuple position within the commad.
			 */
			u16 qbuf[3];
			u16 *qbp = qbuf;
			int nqbuf = min(3, nq);

			nq -= nqbuf;
			qbuf[0] = qbuf[1] = qbuf[2] = 0;
			while (nqbuf) {
				nqbuf--;
				*qbp++ = *rsp++;
				if (rsp >= rsp_end)
					rsp = rspq;
			}
			*qp++ = cpu_to_be32(FW_RSS_IND_TBL_CMD_IQ0(qbuf[0]) |
					    FW_RSS_IND_TBL_CMD_IQ1(qbuf[1]) |
					    FW_RSS_IND_TBL_CMD_IQ2(qbuf[2]));
		}

		/*
		 * Send this portion of the RRS table update to the firmware;
		 * bail out on any errors.
		 */
		ret = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), NULL);
		if (ret)
			return ret;
	}
	return 0;
}
예제 #2
0
파일: srq.c 프로젝트: 7799/linux
static int create_srq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_srq *srq,
			     struct mlx5_create_srq_mbox_in **in, int buf_size,
			     int *inlen)
{
	int err;
	int i;
	struct mlx5_wqe_srq_next_seg *next;
	int page_shift;
	int npages;

	err = mlx5_db_alloc(&dev->mdev, &srq->db);
	if (err) {
		mlx5_ib_warn(dev, "alloc dbell rec failed\n");
		return err;
	}

	*srq->db.db = 0;

	if (mlx5_buf_alloc(&dev->mdev, buf_size, PAGE_SIZE * 2, &srq->buf)) {
		mlx5_ib_dbg(dev, "buf alloc failed\n");
		err = -ENOMEM;
		goto err_db;
	}
	page_shift = srq->buf.page_shift;

	srq->head    = 0;
	srq->tail    = srq->msrq.max - 1;
	srq->wqe_ctr = 0;

	for (i = 0; i < srq->msrq.max; i++) {
		next = get_wqe(srq, i);
		next->next_wqe_index =
			cpu_to_be16((i + 1) & (srq->msrq.max - 1));
	}

	npages = DIV_ROUND_UP(srq->buf.npages, 1 << (page_shift - PAGE_SHIFT));
	mlx5_ib_dbg(dev, "buf_size %d, page_shift %d, npages %d, calc npages %d\n",
		    buf_size, page_shift, srq->buf.npages, npages);
	*inlen = sizeof(**in) + sizeof(*(*in)->pas) * npages;
	*in = mlx5_vzalloc(*inlen);
	if (!*in) {
		err = -ENOMEM;
		goto err_buf;
	}
	mlx5_fill_page_array(&srq->buf, (*in)->pas);

	srq->wrid = kmalloc(srq->msrq.max * sizeof(u64), GFP_KERNEL);
	if (!srq->wrid) {
		mlx5_ib_dbg(dev, "kmalloc failed %lu\n",
			    (unsigned long)(srq->msrq.max * sizeof(u64)));
		err = -ENOMEM;
		goto err_in;
	}
	srq->wq_sig = !!srq_signature;

	(*in)->ctx.log_pg_sz = page_shift - MLX5_ADAPTER_PAGE_SHIFT;

	return 0;

err_in:
	mlx5_vfree(*in);

err_buf:
	mlx5_buf_free(&dev->mdev, &srq->buf);

err_db:
	mlx5_db_free(&dev->mdev, &srq->db);
	return err;
}
예제 #3
0
파일: c2_mm.c 프로젝트: CSCLOG/beaglebone
int
c2_nsmr_register_phys_kern(struct c2_dev *c2dev, u64 *addr_list,
 			   int page_size, int pbl_depth, u32 length,
 			   u32 offset, u64 *va, enum c2_acf acf,
			   struct c2_mr *mr)
{
	struct c2_vq_req *vq_req;
	struct c2wr_nsmr_register_req *wr;
	struct c2wr_nsmr_register_rep *reply;
	u16 flags;
	int i, pbe_count, count;
	int err;

	if (!va || !length || !addr_list || !pbl_depth)
		return -EINTR;

	/*
	 * Verify PBL depth is within rnic max
	 */
	if (pbl_depth > C2_PBL_MAX_DEPTH) {
		return -EINTR;
	}

	/*
	 * allocate verbs request object
	 */
	vq_req = vq_req_alloc(c2dev);
	if (!vq_req)
		return -ENOMEM;

	wr = kmalloc(c2dev->req_vq.msg_size, GFP_KERNEL);
	if (!wr) {
		err = -ENOMEM;
		goto bail0;
	}

	/*
	 * build the WR
	 */
	c2_wr_set_id(wr, CCWR_NSMR_REGISTER);
	wr->hdr.context = (unsigned long) vq_req;
	wr->rnic_handle = c2dev->adapter_handle;

	flags = (acf | MEM_VA_BASED | MEM_REMOTE);

	/*
	 * compute how many pbes can fit in the message
	 */
	pbe_count = (c2dev->req_vq.msg_size -
		     sizeof(struct c2wr_nsmr_register_req)) / sizeof(u64);

	if (pbl_depth <= pbe_count) {
		flags |= MEM_PBL_COMPLETE;
	}
	wr->flags = cpu_to_be16(flags);
	wr->stag_key = 0;	//stag_key;
	wr->va = cpu_to_be64(*va);
	wr->pd_id = mr->pd->pd_id;
	wr->pbe_size = cpu_to_be32(page_size);
	wr->length = cpu_to_be32(length);
	wr->pbl_depth = cpu_to_be32(pbl_depth);
	wr->fbo = cpu_to_be32(offset);
	count = min(pbl_depth, pbe_count);
	wr->addrs_length = cpu_to_be32(count);

	/*
	 * fill out the PBL for this message
	 */
	for (i = 0; i < count; i++) {
		wr->paddrs[i] = cpu_to_be64(addr_list[i]);
	}

	/*
	 * regerence the request struct
	 */
	vq_req_get(c2dev, vq_req);

	/*
	 * send the WR to the adapter
	 */
	err = vq_send_wr(c2dev, (union c2wr *) wr);
	if (err) {
		vq_req_put(c2dev, vq_req);
		goto bail1;
	}

	/*
	 * wait for reply from adapter
	 */
	err = vq_wait_for_reply(c2dev, vq_req);
	if (err) {
		goto bail1;
	}

	/*
	 * process reply
	 */
	reply =
	    (struct c2wr_nsmr_register_rep *) (unsigned long) (vq_req->reply_msg);
	if (!reply) {
		err = -ENOMEM;
		goto bail1;
	}
	if ((err = c2_errno(reply))) {
		goto bail2;
	}
	//*p_pb_entries = be32_to_cpu(reply->pbl_depth);
	mr->ibmr.lkey = mr->ibmr.rkey = be32_to_cpu(reply->stag_index);
	vq_repbuf_free(c2dev, reply);

	/*
	 * if there are still more PBEs we need to send them to
	 * the adapter and wait for a reply on the final one.
	 * reuse vq_req for this purpose.
	 */
	pbl_depth -= count;
	if (pbl_depth) {

		vq_req->reply_msg = (unsigned long) NULL;
		atomic_set(&vq_req->reply_ready, 0);
		err = send_pbl_messages(c2dev,
					cpu_to_be32(mr->ibmr.lkey),
					(unsigned long) &addr_list[i],
					pbl_depth, vq_req, PBL_PHYS);
		if (err) {
			goto bail1;
		}
	}

	vq_req_free(c2dev, vq_req);
	kfree(wr);

	return err;

      bail2:
	vq_repbuf_free(c2dev, reply);
      bail1:
	kfree(wr);
      bail0:
	vq_req_free(c2dev, vq_req);
	return err;
}
static int ath9k_htc_set_channel(struct ath9k_htc_priv *priv,
				 struct ieee80211_hw *hw,
				 struct ath9k_channel *hchan)
{
	struct ath_hw *ah = priv->ah;
	struct ath_common *common = ath9k_hw_common(ah);
	struct ieee80211_conf *conf = &common->hw->conf;
	bool fastcc;
	struct ieee80211_channel *channel = hw->conf.channel;
	struct ath9k_hw_cal_data *caldata;
	enum htc_phymode mode;
	__be16 htc_mode;
	u8 cmd_rsp;
	int ret;

	if (priv->op_flags & OP_INVALID)
		return -EIO;

	fastcc = !!(hw->conf.flags & IEEE80211_CONF_OFFCHANNEL);

	ath9k_htc_ps_wakeup(priv);
	htc_stop(priv->htc);
	WMI_CMD(WMI_DISABLE_INTR_CMDID);
	WMI_CMD(WMI_DRAIN_TXQ_ALL_CMDID);
	WMI_CMD(WMI_STOP_RECV_CMDID);

	ath_dbg(common, ATH_DBG_CONFIG,
		"(%u MHz) -> (%u MHz), HT: %d, HT40: %d fastcc: %d\n",
		priv->ah->curchan->channel,
		channel->center_freq, conf_is_ht(conf), conf_is_ht40(conf),
		fastcc);

	if (!fastcc)
		caldata = &priv->caldata;
	ret = ath9k_hw_reset(ah, hchan, caldata, fastcc);
	if (ret) {
		ath_err(common,
			"Unable to reset channel (%u Mhz) reset status %d\n",
			channel->center_freq, ret);
		goto err;
	}

	ath9k_cmn_update_txpow(ah, priv->curtxpow, priv->txpowlimit,
			       &priv->curtxpow);

	WMI_CMD(WMI_START_RECV_CMDID);
	if (ret)
		goto err;

	ath9k_host_rx_init(priv);

	mode = ath9k_htc_get_curmode(priv, hchan);
	htc_mode = cpu_to_be16(mode);
	WMI_CMD_BUF(WMI_SET_MODE_CMDID, &htc_mode);
	if (ret)
		goto err;

	WMI_CMD(WMI_ENABLE_INTR_CMDID);
	if (ret)
		goto err;

	htc_start(priv->htc);
err:
	ath9k_htc_ps_restore(priv);
	return ret;
}
/* add at the end of the file a new list of snapshots */
static int qcow_write_snapshots(BlockDriverState *bs)
{
    BDRVQcowState *s = bs->opaque;
    QCowSnapshot *sn;
    QCowSnapshotHeader h;
    int i, name_size, id_str_size, snapshots_size;
    uint64_t data64;
    uint32_t data32;
    int64_t offset, snapshots_offset;

    /* compute the size of the snapshots */
    offset = 0;
    for(i = 0; i < s->nb_snapshots; i++) {
        sn = s->snapshots + i;
        offset = align_offset(offset, 8);
        offset += sizeof(h);
        offset += strlen(sn->id_str);
        offset += strlen(sn->name);
    }
    snapshots_size = offset;

    snapshots_offset = qcow2_alloc_clusters(bs, snapshots_size);
    offset = snapshots_offset;
    if (offset < 0) {
        return offset;
    }

    for(i = 0; i < s->nb_snapshots; i++) {
        sn = s->snapshots + i;
        memset(&h, 0, sizeof(h));
        h.l1_table_offset = cpu_to_be64(sn->l1_table_offset);
        h.l1_size = cpu_to_be32(sn->l1_size);
        h.vm_state_size = cpu_to_be32(sn->vm_state_size);
        h.date_sec = cpu_to_be32(sn->date_sec);
        h.date_nsec = cpu_to_be32(sn->date_nsec);
        h.vm_clock_nsec = cpu_to_be64(sn->vm_clock_nsec);

        id_str_size = strlen(sn->id_str);
        name_size = strlen(sn->name);
        h.id_str_size = cpu_to_be16(id_str_size);
        h.name_size = cpu_to_be16(name_size);
        offset = align_offset(offset, 8);
        if (bdrv_pwrite_sync(bs->file, offset, &h, sizeof(h)) < 0)
            goto fail;
        offset += sizeof(h);
        if (bdrv_pwrite_sync(bs->file, offset, sn->id_str, id_str_size) < 0)
            goto fail;
        offset += id_str_size;
        if (bdrv_pwrite_sync(bs->file, offset, sn->name, name_size) < 0)
            goto fail;
        offset += name_size;
    }

    /* update the various header fields */
    data64 = cpu_to_be64(snapshots_offset);
    if (bdrv_pwrite_sync(bs->file, offsetof(QCowHeader, snapshots_offset),
                    &data64, sizeof(data64)) < 0)
        goto fail;
    data32 = cpu_to_be32(s->nb_snapshots);
    if (bdrv_pwrite_sync(bs->file, offsetof(QCowHeader, nb_snapshots),
                    &data32, sizeof(data32)) < 0)
        goto fail;

    /* free the old snapshot table */
    qcow2_free_clusters(bs, s->snapshots_offset, s->snapshots_size);
    s->snapshots_offset = snapshots_offset;
    s->snapshots_size = snapshots_size;
    return 0;
 fail:
    return -1;
}
예제 #6
0
파일: vpc.c 프로젝트: C2Devel/qemu-kvm
static int vpc_create(const char *filename, QemuOpts *opts, Error **errp)
{
    uint8_t buf[1024];
    VHDFooter *footer = (VHDFooter *) buf;
    char *disk_type_param;
    int i;
    uint16_t cyls = 0;
    uint8_t heads = 0;
    uint8_t secs_per_cyl = 0;
    int64_t total_sectors;
    int64_t total_size;
    int disk_type;
    int ret = -EIO;
    Error *local_err = NULL;
    BlockDriverState *bs = NULL;

    /* Read out options */
    total_size = ROUND_UP(qemu_opt_get_size_del(opts, BLOCK_OPT_SIZE, 0),
                          BDRV_SECTOR_SIZE);
    disk_type_param = qemu_opt_get_del(opts, BLOCK_OPT_SUBFMT);
    if (disk_type_param) {
        if (!strcmp(disk_type_param, "dynamic")) {
            disk_type = VHD_DYNAMIC;
        } else if (!strcmp(disk_type_param, "fixed")) {
            disk_type = VHD_FIXED;
        } else {
            ret = -EINVAL;
            goto out;
        }
    } else {
        disk_type = VHD_DYNAMIC;
    }

    ret = bdrv_create_file(filename, opts, &local_err);
    if (ret < 0) {
        error_propagate(errp, local_err);
        goto out;
    }
    ret = bdrv_open(&bs, filename, NULL, NULL, BDRV_O_RDWR | BDRV_O_PROTOCOL,
                    NULL, &local_err);
    if (ret < 0) {
        error_propagate(errp, local_err);
        goto out;
    }

    /*
     * Calculate matching total_size and geometry. Increase the number of
     * sectors requested until we get enough (or fail). This ensures that
     * qemu-img convert doesn't truncate images, but rather rounds up.
     *
     * If the image size can't be represented by a spec conform CHS geometry,
     * we set the geometry to 65535 x 16 x 255 (CxHxS) sectors and use
     * the image size from the VHD footer to calculate total_sectors.
     */
    total_sectors = MIN(VHD_MAX_GEOMETRY, total_size / BDRV_SECTOR_SIZE);
    for (i = 0; total_sectors > (int64_t)cyls * heads * secs_per_cyl; i++) {
        calculate_geometry(total_sectors + i, &cyls, &heads, &secs_per_cyl);
    }

    if ((int64_t)cyls * heads * secs_per_cyl == VHD_MAX_GEOMETRY) {
        total_sectors = total_size / BDRV_SECTOR_SIZE;
        /* Allow a maximum disk size of approximately 2 TB */
        if (total_sectors > VHD_MAX_SECTORS) {
            ret = -EFBIG;
            goto out;
        }
    } else {
        total_sectors = (int64_t)cyls * heads * secs_per_cyl;
        total_size = total_sectors * BDRV_SECTOR_SIZE;
    }

    /* Prepare the Hard Disk Footer */
    memset(buf, 0, 1024);

    memcpy(footer->creator, "conectix", 8);
    /* TODO Check if "qemu" creator_app is ok for VPC */
    memcpy(footer->creator_app, "qemu", 4);
    memcpy(footer->creator_os, "Wi2k", 4);

    footer->features = cpu_to_be32(0x02);
    footer->version = cpu_to_be32(0x00010000);
    if (disk_type == VHD_DYNAMIC) {
        footer->data_offset = cpu_to_be64(HEADER_SIZE);
    } else {
        footer->data_offset = cpu_to_be64(0xFFFFFFFFFFFFFFFFULL);
    }
    footer->timestamp = cpu_to_be32(time(NULL) - VHD_TIMESTAMP_BASE);

    /* Version of Virtual PC 2007 */
    footer->major = cpu_to_be16(0x0005);
    footer->minor = cpu_to_be16(0x0003);
    footer->orig_size = cpu_to_be64(total_size);
    footer->current_size = cpu_to_be64(total_size);
    footer->cyls = cpu_to_be16(cyls);
    footer->heads = heads;
    footer->secs_per_cyl = secs_per_cyl;

    footer->type = cpu_to_be32(disk_type);

#if defined(CONFIG_UUID)
    uuid_generate(footer->uuid);
#endif

    footer->checksum = cpu_to_be32(vpc_checksum(buf, HEADER_SIZE));

    if (disk_type == VHD_DYNAMIC) {
        ret = create_dynamic_disk(bs, buf, total_sectors);
    } else {
        ret = create_fixed_disk(bs, buf, total_size);
    }

out:
    bdrv_unref(bs);
    g_free(disk_type_param);
    return ret;
}
static void
mv88e6123_61_65_get_ethtool_stats(struct dsa_switch *ds,
				  int port, uint64_t *data)
{
	mv88e6xxx_get_ethtool_stats(ds, ARRAY_SIZE(mv88e6123_61_65_hw_stats),
				    mv88e6123_61_65_hw_stats, port, data);
}

static int mv88e6123_61_65_get_sset_count(struct dsa_switch *ds)
{
	return ARRAY_SIZE(mv88e6123_61_65_hw_stats);
}

struct dsa_switch_driver mv88e6123_61_65_switch_driver = {
	.tag_protocol		= cpu_to_be16(ETH_P_EDSA),
	.priv_size		= sizeof(struct mv88e6xxx_priv_state),
	.probe			= mv88e6123_61_65_probe,
	.setup			= mv88e6123_61_65_setup,
	.set_addr		= mv88e6xxx_set_addr_indirect,
	.phy_read		= mv88e6123_61_65_phy_read,
	.phy_write		= mv88e6123_61_65_phy_write,
	.poll_link		= mv88e6xxx_poll_link,
	.get_strings		= mv88e6123_61_65_get_strings,
	.get_ethtool_stats	= mv88e6123_61_65_get_ethtool_stats,
	.get_sset_count		= mv88e6123_61_65_get_sset_count,
};

MODULE_ALIAS("platform:mv88e6123");
MODULE_ALIAS("platform:mv88e6161");
MODULE_ALIAS("platform:mv88e6165");
/*
 * Add an entry to a block directory.
 */
int						/* error */
xfs_dir2_block_addname(
	xfs_da_args_t		*args)		/* directory op arguments */
{
	xfs_dir2_data_free_t	*bf;		/* bestfree table in block */
	xfs_dir2_block_t	*block;		/* directory block structure */
	xfs_dir2_leaf_entry_t	*blp;		/* block leaf entries */
	xfs_dabuf_t		*bp;		/* buffer for block */
	xfs_dir2_block_tail_t	*btp;		/* block tail */
	int			compact;	/* need to compact leaf ents */
	xfs_dir2_data_entry_t	*dep;		/* block data entry */
	xfs_inode_t		*dp;		/* directory inode */
	xfs_dir2_data_unused_t	*dup;		/* block unused entry */
	int			error;		/* error return value */
	xfs_dir2_data_unused_t	*enddup=NULL;	/* unused at end of data */
	xfs_dahash_t		hash;		/* hash value of found entry */
	int			high;		/* high index for binary srch */
	int			highstale;	/* high stale index */
	int			lfloghigh=0;	/* last final leaf to log */
	int			lfloglow=0;	/* first final leaf to log */
	int			len;		/* length of the new entry */
	int			low;		/* low index for binary srch */
	int			lowstale;	/* low stale index */
	int			mid=0;		/* midpoint for binary srch */
	xfs_mount_t		*mp;		/* filesystem mount point */
	int			needlog;	/* need to log header */
	int			needscan;	/* need to rescan freespace */
	__be16			*tagp;		/* pointer to tag value */
	xfs_trans_t		*tp;		/* transaction structure */

	xfs_dir2_trace_args("block_addname", args);
	dp = args->dp;
	tp = args->trans;
	mp = dp->i_mount;
	/*
	 * Read the (one and only) directory block into dabuf bp.
	 */
	if ((error =
	    xfs_da_read_buf(tp, dp, mp->m_dirdatablk, -1, &bp, XFS_DATA_FORK))) {
		return error;
	}
	ASSERT(bp != NULL);
	block = bp->data;
	/*
	 * Check the magic number, corrupted if wrong.
	 */
	if (unlikely(be32_to_cpu(block->hdr.magic) != XFS_DIR2_BLOCK_MAGIC)) {
		XFS_CORRUPTION_ERROR("xfs_dir2_block_addname",
				     XFS_ERRLEVEL_LOW, mp, block);
		xfs_da_brelse(tp, bp);
		return XFS_ERROR(EFSCORRUPTED);
	}
	len = xfs_dir2_data_entsize(args->namelen);
	/*
	 * Set up pointers to parts of the block.
	 */
	bf = block->hdr.bestfree;
	btp = xfs_dir2_block_tail_p(mp, block);
	blp = xfs_dir2_block_leaf_p(btp);
	/*
	 * No stale entries?  Need space for entry and new leaf.
	 */
	if (!btp->stale) {
		/*
		 * Tag just before the first leaf entry.
		 */
		tagp = (__be16 *)blp - 1;
		/*
		 * Data object just before the first leaf entry.
		 */
		enddup = (xfs_dir2_data_unused_t *)((char *)block + be16_to_cpu(*tagp));
		/*
		 * If it's not free then can't do this add without cleaning up:
		 * the space before the first leaf entry needs to be free so it
		 * can be expanded to hold the pointer to the new entry.
		 */
		if (be16_to_cpu(enddup->freetag) != XFS_DIR2_DATA_FREE_TAG)
			dup = enddup = NULL;
		/*
		 * Check out the biggest freespace and see if it's the same one.
		 */
		else {
			dup = (xfs_dir2_data_unused_t *)
			      ((char *)block + be16_to_cpu(bf[0].offset));
			if (dup == enddup) {
				/*
				 * It is the biggest freespace, is it too small
				 * to hold the new leaf too?
				 */
				if (be16_to_cpu(dup->length) < len + (uint)sizeof(*blp)) {
					/*
					 * Yes, we use the second-largest
					 * entry instead if it works.
					 */
					if (be16_to_cpu(bf[1].length) >= len)
						dup = (xfs_dir2_data_unused_t *)
						      ((char *)block +
						       be16_to_cpu(bf[1].offset));
					else
						dup = NULL;
				}
			} else {
				/*
				 * Not the same free entry,
				 * just check its length.
				 */
				if (be16_to_cpu(dup->length) < len) {
					dup = NULL;
				}
			}
		}
		compact = 0;
	}
	/*
	 * If there are stale entries we'll use one for the leaf.
	 * Is the biggest entry enough to avoid compaction?
	 */
	else if (be16_to_cpu(bf[0].length) >= len) {
		dup = (xfs_dir2_data_unused_t *)
		      ((char *)block + be16_to_cpu(bf[0].offset));
		compact = 0;
	}
	/*
	 * Will need to compact to make this work.
	 */
	else {
		/*
		 * Tag just before the first leaf entry.
		 */
		tagp = (__be16 *)blp - 1;
		/*
		 * Data object just before the first leaf entry.
		 */
		dup = (xfs_dir2_data_unused_t *)((char *)block + be16_to_cpu(*tagp));
		/*
		 * If it's not free then the data will go where the
		 * leaf data starts now, if it works at all.
		 */
		if (be16_to_cpu(dup->freetag) == XFS_DIR2_DATA_FREE_TAG) {
			if (be16_to_cpu(dup->length) + (be32_to_cpu(btp->stale) - 1) *
			    (uint)sizeof(*blp) < len)
				dup = NULL;
		} else if ((be32_to_cpu(btp->stale) - 1) * (uint)sizeof(*blp) < len)
			dup = NULL;
		else
			dup = (xfs_dir2_data_unused_t *)blp;
		compact = 1;
	}
	/*
	 * If this isn't a real add, we're done with the buffer.
	 */
	if (args->op_flags & XFS_DA_OP_JUSTCHECK)
		xfs_da_brelse(tp, bp);
	/*
	 * If we don't have space for the new entry & leaf ...
	 */
	if (!dup) {
		/*
		 * Not trying to actually do anything, or don't have
		 * a space reservation: return no-space.
		 */
		if ((args->op_flags & XFS_DA_OP_JUSTCHECK) || args->total == 0)
			return XFS_ERROR(ENOSPC);
		/*
		 * Convert to the next larger format.
		 * Then add the new entry in that format.
		 */
		error = xfs_dir2_block_to_leaf(args, bp);
		xfs_da_buf_done(bp);
		if (error)
			return error;
		return xfs_dir2_leaf_addname(args);
	}
	/*
	 * Just checking, and it would work, so say so.
	 */
	if (args->op_flags & XFS_DA_OP_JUSTCHECK)
		return 0;
	needlog = needscan = 0;
	/*
	 * If need to compact the leaf entries, do it now.
	 * Leave the highest-numbered stale entry stale.
	 * XXX should be the one closest to mid but mid is not yet computed.
	 */
	if (compact) {
		int	fromidx;		/* source leaf index */
		int	toidx;			/* target leaf index */

		for (fromidx = toidx = be32_to_cpu(btp->count) - 1,
			highstale = lfloghigh = -1;
		     fromidx >= 0;
		     fromidx--) {
			if (be32_to_cpu(blp[fromidx].address) == XFS_DIR2_NULL_DATAPTR) {
				if (highstale == -1)
					highstale = toidx;
				else {
					if (lfloghigh == -1)
						lfloghigh = toidx;
					continue;
				}
			}
			if (fromidx < toidx)
				blp[toidx] = blp[fromidx];
			toidx--;
		}
		lfloglow = toidx + 1 - (be32_to_cpu(btp->stale) - 1);
		lfloghigh -= be32_to_cpu(btp->stale) - 1;
		be32_add_cpu(&btp->count, -(be32_to_cpu(btp->stale) - 1));
		xfs_dir2_data_make_free(tp, bp,
			(xfs_dir2_data_aoff_t)((char *)blp - (char *)block),
			(xfs_dir2_data_aoff_t)((be32_to_cpu(btp->stale) - 1) * sizeof(*blp)),
			&needlog, &needscan);
		blp += be32_to_cpu(btp->stale) - 1;
		btp->stale = cpu_to_be32(1);
		/*
		 * If we now need to rebuild the bestfree map, do so.
		 * This needs to happen before the next call to use_free.
		 */
		if (needscan) {
			xfs_dir2_data_freescan(mp, (xfs_dir2_data_t *)block, &needlog);
			needscan = 0;
		}
	}
	/*
	 * Set leaf logging boundaries to impossible state.
	 * For the no-stale case they're set explicitly.
	 */
	else if (btp->stale) {
		lfloglow = be32_to_cpu(btp->count);
		lfloghigh = -1;
	}
	/*
	 * Find the slot that's first lower than our hash value, -1 if none.
	 */
	for (low = 0, high = be32_to_cpu(btp->count) - 1; low <= high; ) {
		mid = (low + high) >> 1;
		if ((hash = be32_to_cpu(blp[mid].hashval)) == args->hashval)
			break;
		if (hash < args->hashval)
			low = mid + 1;
		else
			high = mid - 1;
	}
	while (mid >= 0 && be32_to_cpu(blp[mid].hashval) >= args->hashval) {
		mid--;
	}
	/*
	 * No stale entries, will use enddup space to hold new leaf.
	 */
	if (!btp->stale) {
		/*
		 * Mark the space needed for the new leaf entry, now in use.
		 */
		xfs_dir2_data_use_free(tp, bp, enddup,
			(xfs_dir2_data_aoff_t)
			((char *)enddup - (char *)block + be16_to_cpu(enddup->length) -
			 sizeof(*blp)),
			(xfs_dir2_data_aoff_t)sizeof(*blp),
			&needlog, &needscan);
		/*
		 * Update the tail (entry count).
		 */
		be32_add_cpu(&btp->count, 1);
		/*
		 * If we now need to rebuild the bestfree map, do so.
		 * This needs to happen before the next call to use_free.
		 */
		if (needscan) {
			xfs_dir2_data_freescan(mp, (xfs_dir2_data_t *)block,
				&needlog);
			needscan = 0;
		}
		/*
		 * Adjust pointer to the first leaf entry, we're about to move
		 * the table up one to open up space for the new leaf entry.
		 * Then adjust our index to match.
		 */
		blp--;
		mid++;
		if (mid)
			memmove(blp, &blp[1], mid * sizeof(*blp));
		lfloglow = 0;
		lfloghigh = mid;
	}
	/*
	 * Use a stale leaf for our new entry.
	 */
	else {
		for (lowstale = mid;
		     lowstale >= 0 &&
			be32_to_cpu(blp[lowstale].address) != XFS_DIR2_NULL_DATAPTR;
		     lowstale--)
			continue;
		for (highstale = mid + 1;
		     highstale < be32_to_cpu(btp->count) &&
			be32_to_cpu(blp[highstale].address) != XFS_DIR2_NULL_DATAPTR &&
			(lowstale < 0 || mid - lowstale > highstale - mid);
		     highstale++)
			continue;
		/*
		 * Move entries toward the low-numbered stale entry.
		 */
		if (lowstale >= 0 &&
		    (highstale == be32_to_cpu(btp->count) ||
		     mid - lowstale <= highstale - mid)) {
			if (mid - lowstale)
				memmove(&blp[lowstale], &blp[lowstale + 1],
					(mid - lowstale) * sizeof(*blp));
			lfloglow = MIN(lowstale, lfloglow);
			lfloghigh = MAX(mid, lfloghigh);
		}
		/*
		 * Move entries toward the high-numbered stale entry.
		 */
		else {
			ASSERT(highstale < be32_to_cpu(btp->count));
			mid++;
			if (highstale - mid)
				memmove(&blp[mid + 1], &blp[mid],
					(highstale - mid) * sizeof(*blp));
			lfloglow = MIN(mid, lfloglow);
			lfloghigh = MAX(highstale, lfloghigh);
		}
		be32_add_cpu(&btp->stale, -1);
	}
	/*
	 * Point to the new data entry.
	 */
	dep = (xfs_dir2_data_entry_t *)dup;
	/*
	 * Fill in the leaf entry.
	 */
	blp[mid].hashval = cpu_to_be32(args->hashval);
	blp[mid].address = cpu_to_be32(xfs_dir2_byte_to_dataptr(mp,
				(char *)dep - (char *)block));
	xfs_dir2_block_log_leaf(tp, bp, lfloglow, lfloghigh);
	/*
	 * Mark space for the data entry used.
	 */
	xfs_dir2_data_use_free(tp, bp, dup,
		(xfs_dir2_data_aoff_t)((char *)dup - (char *)block),
		(xfs_dir2_data_aoff_t)len, &needlog, &needscan);
	/*
	 * Create the new data entry.
	 */
	dep->inumber = cpu_to_be64(args->inumber);
	dep->namelen = args->namelen;
	memcpy(dep->name, args->name, args->namelen);
	tagp = xfs_dir2_data_entry_tag_p(dep);
	*tagp = cpu_to_be16((char *)dep - (char *)block);
	/*
	 * Clean up the bestfree array and log the header, tail, and entry.
	 */
	if (needscan)
		xfs_dir2_data_freescan(mp, (xfs_dir2_data_t *)block, &needlog);
	if (needlog)
		xfs_dir2_data_log_header(tp, bp);
	xfs_dir2_block_log_tail(tp, bp);
	xfs_dir2_data_log_entry(tp, bp, dep);
	xfs_dir2_data_check(dp, bp);
	xfs_da_buf_done(bp);
	return 0;
}
예제 #9
0
int wl1271_cmd_build_arp_rsp(struct wl1271 *wl, struct wl12xx_vif *wlvif)
{
	int ret, extra;
	u16 fc;
	struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
	struct sk_buff *skb;
	struct wl12xx_arp_rsp_template *tmpl;
	struct ieee80211_hdr_3addr *hdr;
	struct arphdr *arp_hdr;

	skb = dev_alloc_skb(sizeof(*hdr) + sizeof(__le16) + sizeof(*tmpl) +
			    WL1271_EXTRA_SPACE_MAX);
	if (!skb) {
		wl1271_error("failed to allocate buffer for arp rsp template");
		return -ENOMEM;
	}

	skb_reserve(skb, sizeof(*hdr) + WL1271_EXTRA_SPACE_MAX);

	tmpl = (struct wl12xx_arp_rsp_template *)skb_put(skb, sizeof(*tmpl));
	memset(tmpl, 0, sizeof(tmpl));

	/* llc layer */
	memcpy(tmpl->llc_hdr, rfc1042_header, sizeof(rfc1042_header));
	tmpl->llc_type = cpu_to_be16(ETH_P_ARP);

	/* arp header */
	arp_hdr = &tmpl->arp_hdr;
	arp_hdr->ar_hrd = cpu_to_be16(ARPHRD_ETHER);
	arp_hdr->ar_pro = cpu_to_be16(ETH_P_IP);
	arp_hdr->ar_hln = ETH_ALEN;
	arp_hdr->ar_pln = 4;
	arp_hdr->ar_op = cpu_to_be16(ARPOP_REPLY);

	/* arp payload */
	memcpy(tmpl->sender_hw, vif->addr, ETH_ALEN);
	tmpl->sender_ip = wlvif->ip_addr;

	/* encryption space */
	switch (wlvif->encryption_type) {
	case KEY_TKIP:
		extra = WL1271_EXTRA_SPACE_TKIP;
		break;
	case KEY_AES:
		extra = WL1271_EXTRA_SPACE_AES;
		break;
	case KEY_NONE:
	case KEY_WEP:
	case KEY_GEM:
		extra = 0;
		break;
	default:
		wl1271_warning("Unknown encryption type: %d",
			       wlvif->encryption_type);
		ret = -EINVAL;
		goto out;
	}

	if (extra) {
		u8 *space = skb_push(skb, extra);
		memset(space, 0, extra);
	}

	/* QoS header - BE */
	if (wlvif->sta.qos)
		memset(skb_push(skb, sizeof(__le16)), 0, sizeof(__le16));

	/* mac80211 header */
	hdr = (struct ieee80211_hdr_3addr *)skb_push(skb, sizeof(*hdr));
	memset(hdr, 0, sizeof(hdr));
	fc = IEEE80211_FTYPE_DATA | IEEE80211_FCTL_TODS;
	if (wlvif->sta.qos)
		fc |= IEEE80211_STYPE_QOS_DATA;
	else
		fc |= IEEE80211_STYPE_DATA;
	if (wlvif->encryption_type != KEY_NONE)
		fc |= IEEE80211_FCTL_PROTECTED;

	hdr->frame_control = cpu_to_le16(fc);
	memcpy(hdr->addr1, vif->bss_conf.bssid, ETH_ALEN);
	memcpy(hdr->addr2, vif->addr, ETH_ALEN);
	memset(hdr->addr3, 0xff, ETH_ALEN);

	ret = wl1271_cmd_template_set(wl, wlvif->role_id, CMD_TEMPL_ARP_RSP,
				      skb->data, skb->len, 0,
				      wlvif->basic_rate);
out:
	dev_kfree_skb(skb);
	return ret;
}
예제 #10
0
	spin_unlock_bh(&nf_pptp_lock);

	return ret;
}

static const struct nf_conntrack_expect_policy pptp_exp_policy = {
	.max_expected	= 2,
	.timeout	= 5 * 60,
};

/* control protocol helper */
static struct nf_conntrack_helper pptp __read_mostly = {
	.name			= "pptp",
	.me			= THIS_MODULE,
	.tuple.src.l3num	= AF_INET,
	.tuple.src.u.tcp.port	= cpu_to_be16(PPTP_CONTROL_PORT),
	.tuple.dst.protonum	= IPPROTO_TCP,
	.help			= conntrack_pptp_help,
	.destroy		= pptp_destroy_siblings,
	.expect_policy		= &pptp_exp_policy,
};

static void nf_conntrack_pptp_net_exit(struct net *net)
{
	nf_ct_gre_keymap_flush(net);
}

static struct pernet_operations nf_conntrack_pptp_net_ops = {
	.exit = nf_conntrack_pptp_net_exit,
};
/*
 * Convert the shortform directory to block form.
 */
int						/* error */
xfs_dir2_sf_to_block(
	xfs_da_args_t		*args)		/* operation arguments */
{
	xfs_dir2_db_t		blkno;		/* dir-relative block # (0) */
	xfs_dir2_block_t	*block;		/* block structure */
	xfs_dir2_leaf_entry_t	*blp;		/* block leaf entries */
	xfs_dabuf_t		*bp;		/* block buffer */
	xfs_dir2_block_tail_t	*btp;		/* block tail pointer */
	char			*buf;		/* sf buffer */
	int			buf_len;
	xfs_dir2_data_entry_t	*dep;		/* data entry pointer */
	xfs_inode_t		*dp;		/* incore directory inode */
	int			dummy;		/* trash */
	xfs_dir2_data_unused_t	*dup;		/* unused entry pointer */
	int			endoffset;	/* end of data objects */
	int			error;		/* error return value */
	int			i;		/* index */
	xfs_mount_t		*mp;		/* filesystem mount point */
	int			needlog;	/* need to log block header */
	int			needscan;	/* need to scan block freespc */
	int			newoffset;	/* offset from current entry */
	int			offset;		/* target block offset */
	xfs_dir2_sf_entry_t	*sfep;		/* sf entry pointer */
	xfs_dir2_sf_t		*sfp;		/* shortform structure */
	__be16			*tagp;		/* end of data entry */
	xfs_trans_t		*tp;		/* transaction pointer */
	struct xfs_name		name;

	xfs_dir2_trace_args("sf_to_block", args);
	dp = args->dp;
	tp = args->trans;
	mp = dp->i_mount;
	ASSERT(dp->i_df.if_flags & XFS_IFINLINE);
	/*
	 * Bomb out if the shortform directory is way too short.
	 */
	if (dp->i_d.di_size < offsetof(xfs_dir2_sf_hdr_t, parent)) {
		ASSERT(XFS_FORCED_SHUTDOWN(mp));
		return XFS_ERROR(EIO);
	}
	ASSERT(dp->i_df.if_bytes == dp->i_d.di_size);
	ASSERT(dp->i_df.if_u1.if_data != NULL);
	sfp = (xfs_dir2_sf_t *)dp->i_df.if_u1.if_data;
	ASSERT(dp->i_d.di_size >= xfs_dir2_sf_hdr_size(sfp->hdr.i8count));
	/*
	 * Copy the directory into the stack buffer.
	 * Then pitch the incore inode data so we can make extents.
	 */

	buf_len = dp->i_df.if_bytes;
	buf = kmem_alloc(dp->i_df.if_bytes, KM_SLEEP);

	memcpy(buf, sfp, dp->i_df.if_bytes);
	xfs_idata_realloc(dp, -dp->i_df.if_bytes, XFS_DATA_FORK);
	dp->i_d.di_size = 0;
	xfs_trans_log_inode(tp, dp, XFS_ILOG_CORE);
	/*
	 * Reset pointer - old sfp is gone.
	 */
	sfp = (xfs_dir2_sf_t *)buf;
	/*
	 * Add block 0 to the inode.
	 */
	error = xfs_dir2_grow_inode(args, XFS_DIR2_DATA_SPACE, &blkno);
	if (error) {
		kmem_free(buf);
		return error;
	}
	/*
	 * Initialize the data block.
	 */
	error = xfs_dir2_data_init(args, blkno, &bp);
	if (error) {
		kmem_free(buf);
		return error;
	}
	block = bp->data;
	block->hdr.magic = cpu_to_be32(XFS_DIR2_BLOCK_MAGIC);
	/*
	 * Compute size of block "tail" area.
	 */
	i = (uint)sizeof(*btp) +
	    (sfp->hdr.count + 2) * (uint)sizeof(xfs_dir2_leaf_entry_t);
	/*
	 * The whole thing is initialized to free by the init routine.
	 * Say we're using the leaf and tail area.
	 */
	dup = (xfs_dir2_data_unused_t *)block->u;
	needlog = needscan = 0;
	xfs_dir2_data_use_free(tp, bp, dup, mp->m_dirblksize - i, i, &needlog,
		&needscan);
	ASSERT(needscan == 0);
	/*
	 * Fill in the tail.
	 */
	btp = xfs_dir2_block_tail_p(mp, block);
	btp->count = cpu_to_be32(sfp->hdr.count + 2);	/* ., .. */
	btp->stale = 0;
	blp = xfs_dir2_block_leaf_p(btp);
	endoffset = (uint)((char *)blp - (char *)block);
	/*
	 * Remove the freespace, we'll manage it.
	 */
	xfs_dir2_data_use_free(tp, bp, dup,
		(xfs_dir2_data_aoff_t)((char *)dup - (char *)block),
		be16_to_cpu(dup->length), &needlog, &needscan);
	/*
	 * Create entry for .
	 */
	dep = (xfs_dir2_data_entry_t *)
	      ((char *)block + XFS_DIR2_DATA_DOT_OFFSET);
	dep->inumber = cpu_to_be64(dp->i_ino);
	dep->namelen = 1;
	dep->name[0] = '.';
	tagp = xfs_dir2_data_entry_tag_p(dep);
	*tagp = cpu_to_be16((char *)dep - (char *)block);
	xfs_dir2_data_log_entry(tp, bp, dep);
	blp[0].hashval = cpu_to_be32(xfs_dir_hash_dot);
	blp[0].address = cpu_to_be32(xfs_dir2_byte_to_dataptr(mp,
				(char *)dep - (char *)block));
	/*
	 * Create entry for ..
	 */
	dep = (xfs_dir2_data_entry_t *)
		((char *)block + XFS_DIR2_DATA_DOTDOT_OFFSET);
	dep->inumber = cpu_to_be64(xfs_dir2_sf_get_inumber(sfp, &sfp->hdr.parent));
	dep->namelen = 2;
	dep->name[0] = dep->name[1] = '.';
	tagp = xfs_dir2_data_entry_tag_p(dep);
	*tagp = cpu_to_be16((char *)dep - (char *)block);
	xfs_dir2_data_log_entry(tp, bp, dep);
	blp[1].hashval = cpu_to_be32(xfs_dir_hash_dotdot);
	blp[1].address = cpu_to_be32(xfs_dir2_byte_to_dataptr(mp,
				(char *)dep - (char *)block));
	offset = XFS_DIR2_DATA_FIRST_OFFSET;
	/*
	 * Loop over existing entries, stuff them in.
	 */
	if ((i = 0) == sfp->hdr.count)
		sfep = NULL;
	else
		sfep = xfs_dir2_sf_firstentry(sfp);
	/*
	 * Need to preserve the existing offset values in the sf directory.
	 * Insert holes (unused entries) where necessary.
	 */
	while (offset < endoffset) {
		/*
		 * sfep is null when we reach the end of the list.
		 */
		if (sfep == NULL)
			newoffset = endoffset;
		else
			newoffset = xfs_dir2_sf_get_offset(sfep);
		/*
		 * There should be a hole here, make one.
		 */
		if (offset < newoffset) {
			dup = (xfs_dir2_data_unused_t *)
			      ((char *)block + offset);
			dup->freetag = cpu_to_be16(XFS_DIR2_DATA_FREE_TAG);
			dup->length = cpu_to_be16(newoffset - offset);
			*xfs_dir2_data_unused_tag_p(dup) = cpu_to_be16(
				((char *)dup - (char *)block));
			xfs_dir2_data_log_unused(tp, bp, dup);
			(void)xfs_dir2_data_freeinsert((xfs_dir2_data_t *)block,
				dup, &dummy);
			offset += be16_to_cpu(dup->length);
			continue;
		}
		/*
		 * Copy a real entry.
		 */
		dep = (xfs_dir2_data_entry_t *)((char *)block + newoffset);
		dep->inumber = cpu_to_be64(xfs_dir2_sf_get_inumber(sfp,
				xfs_dir2_sf_inumberp(sfep)));
		dep->namelen = sfep->namelen;
		memcpy(dep->name, sfep->name, dep->namelen);
		tagp = xfs_dir2_data_entry_tag_p(dep);
		*tagp = cpu_to_be16((char *)dep - (char *)block);
		xfs_dir2_data_log_entry(tp, bp, dep);
		name.name = sfep->name;
		name.len = sfep->namelen;
		blp[2 + i].hashval = cpu_to_be32(mp->m_dirnameops->
							hashname(&name));
		blp[2 + i].address = cpu_to_be32(xfs_dir2_byte_to_dataptr(mp,
						 (char *)dep - (char *)block));
		offset = (int)((char *)(tagp + 1) - (char *)block);
		if (++i == sfp->hdr.count)
			sfep = NULL;
		else
			sfep = xfs_dir2_sf_nextentry(sfp, sfep);
	}
	/* Done with the temporary buffer */
	kmem_free(buf);
	/*
	 * Sort the leaf entries by hash value.
	 */
	xfs_sort(blp, be32_to_cpu(btp->count), sizeof(*blp), xfs_dir2_block_sort);
	/*
	 * Log the leaf entry area and tail.
	 * Already logged the header in data_init, ignore needlog.
	 */
	ASSERT(needscan == 0);
	xfs_dir2_block_log_leaf(tp, bp, 0, be32_to_cpu(btp->count) - 1);
	xfs_dir2_block_log_tail(tp, bp);
	xfs_dir2_data_check(dp, bp);
	xfs_da_buf_done(bp);
	return 0;
}
예제 #12
0
static inline void flash_write16(u16 d, void __iomem *addr)
{
	__raw_writew(cpu_to_be16(d), (void __iomem *)((unsigned long)addr ^ 0x2));
}
예제 #13
0
static int redrat3_transmit_ir(struct rc_dev *rcdev, int *txbuf, u32 n)
{
	struct redrat3_dev *rr3 = rcdev->priv;
	struct device *dev = rr3->dev;
	struct redrat3_signal_header header;
	int i, j, count, ret, ret_len, offset;
	int lencheck, cur_sample_len, pipe;
	char *buffer = NULL, *sigdata = NULL;
	int *sample_lens = NULL;
	u32 tmpi;
	u16 tmps;
	u8 *datap;
	u8 curlencheck = 0;
	u16 *lengths_ptr;
	int sendbuf_len;

	rr3_ftr(dev, "Entering %s\n", __func__);

	if (rr3->transmitting) {
		dev_warn(dev, "%s: transmitter already in use\n", __func__);
		return -EAGAIN;
	}

	count = n / sizeof(int);
	if (count > (RR3_DRIVER_MAXLENS * 2))
		return -EINVAL;

	rr3->transmitting = true;

	redrat3_disable_detector(rr3);

	if (rr3->det_enabled) {
		dev_err(dev, "%s: cannot tx while rx is enabled\n", __func__);
		ret = -EIO;
		goto out;
	}

	sample_lens = kzalloc(sizeof(int) * RR3_DRIVER_MAXLENS, GFP_KERNEL);
	if (!sample_lens) {
		ret = -ENOMEM;
		goto out;
	}

	for (i = 0; i < count; i++) {
		for (lencheck = 0; lencheck < curlencheck; lencheck++) {
			cur_sample_len = redrat3_us_to_len(txbuf[i]);
			if (sample_lens[lencheck] == cur_sample_len)
				break;
		}
		if (lencheck == curlencheck) {
			cur_sample_len = redrat3_us_to_len(txbuf[i]);
			rr3_dbg(dev, "txbuf[%d]=%u, pos %d, enc %u\n",
				i, txbuf[i], curlencheck, cur_sample_len);
			if (curlencheck < 255) {
				/* now convert the value to a proper
				 * rr3 value.. */
				sample_lens[curlencheck] = cur_sample_len;
				curlencheck++;
			} else {
				dev_err(dev, "signal too long\n");
				ret = -EINVAL;
				goto out;
			}
		}
	}

	sigdata = kzalloc((count + RR3_TX_TRAILER_LEN), GFP_KERNEL);
	if (!sigdata) {
		ret = -ENOMEM;
		goto out;
	}

	sigdata[count] = RR3_END_OF_SIGNAL;
	sigdata[count + 1] = RR3_END_OF_SIGNAL;
	for (i = 0; i < count; i++) {
		for (j = 0; j < curlencheck; j++) {
			if (sample_lens[j] == redrat3_us_to_len(txbuf[i]))
				sigdata[i] = j;
		}
	}

	offset = RR3_TX_HEADER_OFFSET;
	sendbuf_len = RR3_HEADER_LENGTH + (sizeof(u16) * RR3_DRIVER_MAXLENS)
			+ count + RR3_TX_TRAILER_LEN + offset;

	buffer = kzalloc(sendbuf_len, GFP_KERNEL);
	if (!buffer) {
		ret = -ENOMEM;
		goto out;
	}

	/* fill in our packet header */
	header.length = sendbuf_len - offset;
	header.transfer_type = RR3_MOD_SIGNAL_OUT;
	header.pause = redrat3_len_to_us(100);
	header.mod_freq_count = mod_freq_to_val(rr3->carrier);
	header.no_periods = 0; /* n/a to transmit */
	header.max_lengths = RR3_DRIVER_MAXLENS;
	header.no_lengths = curlencheck;
	header.max_sig_size = RR3_MAX_SIG_SIZE;
	header.sig_size = count + RR3_TX_TRAILER_LEN;
	/* we currently rely on repeat handling in the IR encoding source */
	header.no_repeats = 0;

	tmps = cpu_to_be16(header.length);
	memcpy(buffer, &tmps, 2);

	tmps = cpu_to_be16(header.transfer_type);
	memcpy(buffer + 2, &tmps, 2);

	tmpi = cpu_to_be32(header.pause);
	memcpy(buffer + offset, &tmpi, sizeof(tmpi));

	tmps = cpu_to_be16(header.mod_freq_count);
	memcpy(buffer + offset + RR3_FREQ_COUNT_OFFSET, &tmps, 2);

	buffer[offset + RR3_NUM_LENGTHS_OFFSET] = header.no_lengths;

	tmps = cpu_to_be16(header.sig_size);
	memcpy(buffer + offset + RR3_NUM_SIGS_OFFSET, &tmps, 2);

	buffer[offset + RR3_REPEATS_OFFSET] = header.no_repeats;

	lengths_ptr = (u16 *)(buffer + offset + RR3_HEADER_LENGTH);
	for (i = 0; i < curlencheck; ++i)
		lengths_ptr[i] = cpu_to_be16(sample_lens[i]);

	datap = (u8 *)(buffer + offset + RR3_HEADER_LENGTH +
			    (sizeof(u16) * RR3_DRIVER_MAXLENS));
	memcpy(datap, sigdata, (count + RR3_TX_TRAILER_LEN));

	if (debug) {
		redrat3_dump_signal_header(&header);
		redrat3_dump_signal_data(buffer, header.sig_size);
	}

	pipe = usb_sndbulkpipe(rr3->udev, rr3->ep_out->bEndpointAddress);
	tmps = usb_bulk_msg(rr3->udev, pipe, buffer,
			    sendbuf_len, &ret_len, 10 * HZ);
	rr3_dbg(dev, "sent %d bytes, (ret %d)\n", ret_len, tmps);

	/* now tell the hardware to transmit what we sent it */
	pipe = usb_rcvctrlpipe(rr3->udev, 0);
	ret = usb_control_msg(rr3->udev, pipe, RR3_TX_SEND_SIGNAL,
			      USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN,
			      0, 0, buffer, 2, HZ * 10);

	if (ret < 0)
		dev_err(dev, "Error: control msg send failed, rc %d\n", ret);
	else
		ret = n;

out:
	kfree(sample_lens);
	kfree(buffer);
	kfree(sigdata);

	rr3->transmitting = false;

	redrat3_enable_detector(rr3);

	return ret;
}
int
xfs_qm_scall_setqlim(
	xfs_mount_t		*mp,
	xfs_dqid_t		id,
	uint			type,
	fs_disk_quota_t		*newlim)
{
	struct xfs_quotainfo	*q = mp->m_quotainfo;
	xfs_disk_dquot_t	*ddq;
	xfs_dquot_t		*dqp;
	xfs_trans_t		*tp;
	int			error;
	xfs_qcnt_t		hard, soft;

	if (newlim->d_fieldmask & ~XFS_DQ_MASK)
		return EINVAL;
	if ((newlim->d_fieldmask & XFS_DQ_MASK) == 0)
		return 0;

	tp = xfs_trans_alloc(mp, XFS_TRANS_QM_SETQLIM);
	if ((error = xfs_trans_reserve(tp, 0, sizeof(xfs_disk_dquot_t) + 128,
				      0, 0, XFS_DEFAULT_LOG_COUNT))) {
		xfs_trans_cancel(tp, 0);
		return (error);
	}

	mutex_lock(&q->qi_quotaofflock);

	if ((error = xfs_qm_dqget(mp, NULL, id, type, XFS_QMOPT_DQALLOC, &dqp))) {
		xfs_trans_cancel(tp, XFS_TRANS_ABORT);
		ASSERT(error != ENOENT);
		goto out_unlock;
	}
	xfs_trans_dqjoin(tp, dqp);
	ddq = &dqp->q_core;

	hard = (newlim->d_fieldmask & FS_DQ_BHARD) ?
		(xfs_qcnt_t) XFS_BB_TO_FSB(mp, newlim->d_blk_hardlimit) :
			be64_to_cpu(ddq->d_blk_hardlimit);
	soft = (newlim->d_fieldmask & FS_DQ_BSOFT) ?
		(xfs_qcnt_t) XFS_BB_TO_FSB(mp, newlim->d_blk_softlimit) :
			be64_to_cpu(ddq->d_blk_softlimit);
	if (hard == 0 || hard >= soft) {
		ddq->d_blk_hardlimit = cpu_to_be64(hard);
		ddq->d_blk_softlimit = cpu_to_be64(soft);
		if (id == 0) {
			q->qi_bhardlimit = hard;
			q->qi_bsoftlimit = soft;
		}
	} else {
		xfs_debug(mp, "blkhard %Ld < blksoft %Ld\n", hard, soft);
	}
	hard = (newlim->d_fieldmask & FS_DQ_RTBHARD) ?
		(xfs_qcnt_t) XFS_BB_TO_FSB(mp, newlim->d_rtb_hardlimit) :
			be64_to_cpu(ddq->d_rtb_hardlimit);
	soft = (newlim->d_fieldmask & FS_DQ_RTBSOFT) ?
		(xfs_qcnt_t) XFS_BB_TO_FSB(mp, newlim->d_rtb_softlimit) :
			be64_to_cpu(ddq->d_rtb_softlimit);
	if (hard == 0 || hard >= soft) {
		ddq->d_rtb_hardlimit = cpu_to_be64(hard);
		ddq->d_rtb_softlimit = cpu_to_be64(soft);
		if (id == 0) {
			q->qi_rtbhardlimit = hard;
			q->qi_rtbsoftlimit = soft;
		}
	} else {
		xfs_debug(mp, "rtbhard %Ld < rtbsoft %Ld\n", hard, soft);
	}

	hard = (newlim->d_fieldmask & FS_DQ_IHARD) ?
		(xfs_qcnt_t) newlim->d_ino_hardlimit :
			be64_to_cpu(ddq->d_ino_hardlimit);
	soft = (newlim->d_fieldmask & FS_DQ_ISOFT) ?
		(xfs_qcnt_t) newlim->d_ino_softlimit :
			be64_to_cpu(ddq->d_ino_softlimit);
	if (hard == 0 || hard >= soft) {
		ddq->d_ino_hardlimit = cpu_to_be64(hard);
		ddq->d_ino_softlimit = cpu_to_be64(soft);
		if (id == 0) {
			q->qi_ihardlimit = hard;
			q->qi_isoftlimit = soft;
		}
	} else {
		xfs_debug(mp, "ihard %Ld < isoft %Ld\n", hard, soft);
	}

	if (newlim->d_fieldmask & FS_DQ_BWARNS)
		ddq->d_bwarns = cpu_to_be16(newlim->d_bwarns);
	if (newlim->d_fieldmask & FS_DQ_IWARNS)
		ddq->d_iwarns = cpu_to_be16(newlim->d_iwarns);
	if (newlim->d_fieldmask & FS_DQ_RTBWARNS)
		ddq->d_rtbwarns = cpu_to_be16(newlim->d_rtbwarns);

	if (id == 0) {
		if (newlim->d_fieldmask & FS_DQ_BTIMER) {
			q->qi_btimelimit = newlim->d_btimer;
			ddq->d_btimer = cpu_to_be32(newlim->d_btimer);
		}
		if (newlim->d_fieldmask & FS_DQ_ITIMER) {
			q->qi_itimelimit = newlim->d_itimer;
			ddq->d_itimer = cpu_to_be32(newlim->d_itimer);
		}
		if (newlim->d_fieldmask & FS_DQ_RTBTIMER) {
			q->qi_rtbtimelimit = newlim->d_rtbtimer;
			ddq->d_rtbtimer = cpu_to_be32(newlim->d_rtbtimer);
		}
		if (newlim->d_fieldmask & FS_DQ_BWARNS)
			q->qi_bwarnlimit = newlim->d_bwarns;
		if (newlim->d_fieldmask & FS_DQ_IWARNS)
			q->qi_iwarnlimit = newlim->d_iwarns;
		if (newlim->d_fieldmask & FS_DQ_RTBWARNS)
			q->qi_rtbwarnlimit = newlim->d_rtbwarns;
	} else {
		xfs_qm_adjust_dqtimers(mp, ddq);
	}
	dqp->dq_flags |= XFS_DQ_DIRTY;
	xfs_trans_log_dquot(tp, dqp);

	error = xfs_trans_commit(tp, 0);
	xfs_qm_dqrele(dqp);

 out_unlock:
	mutex_unlock(&q->qi_quotaofflock);
	return error;
}
예제 #15
0
static void regmap_format_7_9_write(struct regmap *map,
				    unsigned int reg, unsigned int val)
{
	__be16 *out = map->work_buf;
	*out = cpu_to_be16((reg << 9) | val);
}
예제 #16
0
static void set_imx_hdr_v2(struct imx_header *imxhdr, uint32_t dcd_len,
		uint32_t entry_point, uint32_t flash_offset)
{
	imx_header_v2_t *hdr_v2 = &imxhdr->header.hdr_v2;
	flash_header_v2_t *fhdr_v2 = &hdr_v2->fhdr;
	uint32_t hdr_base;

	/* Set magic number */
	fhdr_v2->header.tag = IVT_HEADER_TAG; /* 0xD1 */
	fhdr_v2->header.length = cpu_to_be16(sizeof(flash_header_v2_t));
	fhdr_v2->header.version = IVT_VERSION; /* 0x40 */

	if (!hdr_v2->boot_data.plugin) {
		fhdr_v2->entry = entry_point;
		fhdr_v2->reserved1 = fhdr_v2->reserved2 = 0;
		hdr_base = entry_point - imximage_init_loadsize +
			flash_offset;
		fhdr_v2->self = hdr_base;
		fhdr_v2->dcd_ptr = hdr_base + offsetof(imx_header_v2_t, data);
		fhdr_v2->boot_data_ptr = hdr_base
				+ offsetof(imx_header_v2_t, boot_data);
		hdr_v2->boot_data.start = entry_point - imximage_init_loadsize;

		fhdr_v2->csf = 0;

		header_size_ptr = &hdr_v2->boot_data.size;
		csf_ptr = &fhdr_v2->csf;
	} else {
		imx_header_v2_t *next_hdr_v2;
		flash_header_v2_t *next_fhdr_v2;

		if(imximage_csf_size != 0) {
			fprintf(stderr, "Error: Header v2: SECURE_BOOT"
					"is only supported in DCD mode!");
			exit(EXIT_FAILURE);
		}

		fhdr_v2->entry = imximage_iram_free_start +
			flash_offset + sizeof(flash_header_v2_t) +
			sizeof(boot_data_t);

		fhdr_v2->reserved1 = fhdr_v2->reserved2 = 0;
		fhdr_v2->self = imximage_iram_free_start + flash_offset;

		fhdr_v2->dcd_ptr = 0;

		fhdr_v2->boot_data_ptr = fhdr_v2->self +
				offsetof(imx_header_v2_t, boot_data);

		hdr_v2->boot_data.start = imximage_iram_free_start;
		/*
		 * The actural size of plugin image is "imximage_plugin_size +
		 * sizeof(flash_header_v2_t) + sizeof(boot_data_t)", plus the
		 * flash_offset space.The ROM code only need to copy this size
		 * to run the plugin code. However, later when copy the whole
		 * U-Boot image to DDR, the ROM code use memcpy to copy the
		 * first part of the image, and use the storage read function
		 * to get the remaining part. This requires the dividing point
		 * must be multiple of storage sector size. Here we set the
		 * first section to be 16KB for this purpose.
		 */
		hdr_v2->boot_data.size = MAX_PLUGIN_CODE_SIZE;

		/* Security feature are not supported */
		fhdr_v2->csf = 0;

		next_hdr_v2 = (imx_header_v2_t *)((char*)hdr_v2 +
				imximage_plugin_size);

		next_fhdr_v2 = &next_hdr_v2->fhdr;

		next_fhdr_v2->header.tag = IVT_HEADER_TAG; /* 0xD1 */
		next_fhdr_v2->header.length =
			cpu_to_be16(sizeof(flash_header_v2_t));
		next_fhdr_v2->header.version = IVT_VERSION; /* 0x40 */

		next_fhdr_v2->entry = entry_point;
		hdr_base = entry_point - sizeof(struct imx_header);
		next_fhdr_v2->reserved1 = next_fhdr_v2->reserved2 = 0;
		next_fhdr_v2->self = hdr_base + imximage_plugin_size;

		next_fhdr_v2->dcd_ptr = 0;
		next_fhdr_v2->boot_data_ptr = next_fhdr_v2->self +
				offsetof(imx_header_v2_t, boot_data);

		next_hdr_v2->boot_data.start = hdr_base - flash_offset;

		header_size_ptr = &next_hdr_v2->boot_data.size;

		next_hdr_v2->boot_data.plugin = 0;

		next_fhdr_v2->csf = 0;
	}
}
예제 #17
0
static void regmap_format_16(void *buf, unsigned int val)
{
	__be16 *b = buf;

	b[0] = cpu_to_be16(val);
}
예제 #18
0
int mthca_create_ah(struct mthca_dev *dev,
		    struct mthca_pd *pd,
		    struct ib_ah_attr *ah_attr,
		    struct mthca_ah *ah)
{
	u32 index = -1;
	struct mthca_av *av = NULL;

	ah->type = MTHCA_AH_PCI_POOL;

	if (mthca_is_memfree(dev)) {
		ah->av   = kmalloc(sizeof *ah->av, GFP_ATOMIC);
		if (!ah->av)
			return -ENOMEM;

		ah->type = MTHCA_AH_KMALLOC;
		av       = ah->av;
	} else if (!atomic_read(&pd->sqp_count) &&
		 !(dev->mthca_flags & MTHCA_FLAG_DDR_HIDDEN)) {
		index = mthca_alloc(&dev->av_table.alloc);

		/* fall back to allocate in host memory */
		if (index == -1)
			goto on_hca_fail;

		av = kmalloc(sizeof *av, GFP_ATOMIC);
		if (!av)
			goto on_hca_fail;

		ah->type = MTHCA_AH_ON_HCA;
		ah->avdma  = dev->av_table.ddr_av_base +
			index * MTHCA_AV_SIZE;
	}

on_hca_fail:
	if (ah->type == MTHCA_AH_PCI_POOL) {
		ah->av = pci_pool_alloc(dev->av_table.pool,
					SLAB_ATOMIC, &ah->avdma);
		if (!ah->av)
			return -ENOMEM;

		av = ah->av;
	}

	ah->key = pd->ntmr.ibmr.lkey;

	memset(av, 0, MTHCA_AV_SIZE);

	av->port_pd = cpu_to_be32(pd->pd_num | (ah_attr->port_num << 24));
	av->g_slid  = ah_attr->src_path_bits;
	av->dlid    = cpu_to_be16(ah_attr->dlid);
	av->msg_sr  = (3 << 4) | /* 2K message */
		ah_attr->static_rate;
	av->sl_tclass_flowlabel = cpu_to_be32(ah_attr->sl << 28);
	if (ah_attr->ah_flags & IB_AH_GRH) {
		av->g_slid |= 0x80;
		av->gid_index = (ah_attr->port_num - 1) * dev->limits.gid_table_len +
			ah_attr->grh.sgid_index;
		av->hop_limit = ah_attr->grh.hop_limit;
		av->sl_tclass_flowlabel |=
			cpu_to_be32((ah_attr->grh.traffic_class << 20) |
				    ah_attr->grh.flow_label);
		memcpy(av->dgid, ah_attr->grh.dgid.raw, 16);
	} else {
		/* Arbel workaround -- low byte of GID must be 2 */
		av->dgid[3] = cpu_to_be32(2);
	}

	if (0) {
		int j;

		mthca_dbg(dev, "Created UDAV at %p/%08lx:\n",
			  av, (unsigned long) ah->avdma);
		for (j = 0; j < 8; ++j)
			printk(KERN_DEBUG "  [%2x] %08x\n",
			       j * 4, be32_to_cpu(((u32 *) av)[j]));
	}

	if (ah->type == MTHCA_AH_ON_HCA) {
		memcpy_toio(dev->av_table.av_map + index * MTHCA_AV_SIZE,
			    av, MTHCA_AV_SIZE);
		kfree(av);
	}

	return 0;
}
					skb->len) < 0)
			goto out_dev;
		dev_queue_xmit(skb);
		dev_put(out_dev);
		return NET_RX_SUCCESS;
out_dev:
		dev_put(out_dev);
	}

out:
	kfree_skb(skb);
	return NET_RX_DROP;
}

static struct packet_type phonet_packet_type __read_mostly = {
	.type = cpu_to_be16(ETH_P_PHONET),
	.func = phonet_rcv,
};

static DEFINE_MUTEX(proto_tab_lock);

int __init_or_module phonet_proto_register(int protocol,
						struct phonet_protocol *pp)
{
	int err = 0;

	if (protocol >= PHONET_NPROTO)
		return -EINVAL;

	err = proto_register(pp->prot, 1);
	if (err)
예제 #20
0
static int create_cq(struct c4iw_rdev *rdev, struct t4_cq *cq,
		     struct c4iw_dev_ucontext *uctx)
{
	struct fw_ri_res_wr *res_wr;
	struct fw_ri_res *res;
	int wr_len;
	int user = (uctx != &rdev->uctx);
	struct c4iw_wr_wait wr_wait;
	int ret;
	struct sk_buff *skb;

	cq->cqid = c4iw_get_cqid(rdev, uctx);
	if (!cq->cqid) {
		ret = -ENOMEM;
		goto err1;
	}

	if (!user) {
		cq->sw_queue = kzalloc(cq->memsize, GFP_KERNEL);
		if (!cq->sw_queue) {
			ret = -ENOMEM;
			goto err2;
		}
	}
	cq->queue = dma_alloc_coherent(&rdev->lldi.pdev->dev, cq->memsize,
				       &cq->dma_addr, GFP_KERNEL);
	if (!cq->queue) {
		ret = -ENOMEM;
		goto err3;
	}
	pci_unmap_addr_set(cq, mapping, cq->dma_addr);
	memset(cq->queue, 0, cq->memsize);

	/* build fw_ri_res_wr */
	wr_len = sizeof *res_wr + sizeof *res;

	skb = alloc_skb(wr_len, GFP_KERNEL);
	if (!skb) {
		ret = -ENOMEM;
		goto err4;
	}
	set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0);

	res_wr = (struct fw_ri_res_wr *)__skb_put(skb, wr_len);
	memset(res_wr, 0, wr_len);
	res_wr->op_nres = cpu_to_be32(
			FW_WR_OP(FW_RI_RES_WR) |
			V_FW_RI_RES_WR_NRES(1) |
			FW_WR_COMPL(1));
	res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16));
	res_wr->cookie = (unsigned long) &wr_wait;
	res = res_wr->res;
	res->u.cq.restype = FW_RI_RES_TYPE_CQ;
	res->u.cq.op = FW_RI_RES_OP_WRITE;
	res->u.cq.iqid = cpu_to_be32(cq->cqid);
	res->u.cq.iqandst_to_iqandstindex = cpu_to_be32(
			V_FW_RI_RES_WR_IQANUS(0) |
			V_FW_RI_RES_WR_IQANUD(1) |
			F_FW_RI_RES_WR_IQANDST |
			V_FW_RI_RES_WR_IQANDSTINDEX(
				rdev->lldi.ciq_ids[cq->vector]));
	res->u.cq.iqdroprss_to_iqesize = cpu_to_be16(
			F_FW_RI_RES_WR_IQDROPRSS |
			V_FW_RI_RES_WR_IQPCIECH(2) |
			V_FW_RI_RES_WR_IQINTCNTTHRESH(0) |
			F_FW_RI_RES_WR_IQO |
			V_FW_RI_RES_WR_IQESIZE(1));
	res->u.cq.iqsize = cpu_to_be16(cq->size);
	res->u.cq.iqaddr = cpu_to_be64(cq->dma_addr);

	c4iw_init_wr_wait(&wr_wait);

	ret = c4iw_ofld_send(rdev, skb);
	if (ret)
		goto err4;
	PDBG("%s wait_event wr_wait %p\n", __func__, &wr_wait);
	ret = c4iw_wait_for_reply(rdev, &wr_wait, 0, 0, __func__);
	if (ret)
		goto err4;

	cq->gen = 1;
	cq->gts = rdev->lldi.gts_reg;
	cq->rdev = rdev;
	if (user) {
		cq->ugts = (u64)pci_resource_start(rdev->lldi.pdev, 2) +
					(cq->cqid << rdev->cqshift);
		cq->ugts &= PAGE_MASK;
	}
	return 0;
err4:
	dma_free_coherent(&rdev->lldi.pdev->dev, cq->memsize, cq->queue,
			  pci_unmap_addr(cq, mapping));
err3:
	kfree(cq->sw_queue);
err2:
	c4iw_put_cqid(rdev, cq->cqid, uctx);
err1:
	return ret;
}
static int ath9k_htc_add_interface(struct ieee80211_hw *hw,
				   struct ieee80211_vif *vif)
{
	struct ath9k_htc_priv *priv = hw->priv;
	struct ath9k_htc_vif *avp = (void *)vif->drv_priv;
	struct ath_common *common = ath9k_hw_common(priv->ah);
	struct ath9k_htc_target_vif hvif;
	int ret = 0;
	u8 cmd_rsp;

	mutex_lock(&priv->mutex);

	/* Only one interface for now */
	if (priv->nvifs > 0) {
		ret = -ENOBUFS;
		goto out;
	}

	ath9k_htc_ps_wakeup(priv);
	memset(&hvif, 0, sizeof(struct ath9k_htc_target_vif));
	memcpy(&hvif.myaddr, vif->addr, ETH_ALEN);

	switch (vif->type) {
	case NL80211_IFTYPE_STATION:
		hvif.opmode = cpu_to_be32(HTC_M_STA);
		break;
	case NL80211_IFTYPE_ADHOC:
		hvif.opmode = cpu_to_be32(HTC_M_IBSS);
		break;
	default:
		ath_err(common,
			"Interface type %d not yet supported\n", vif->type);
		ret = -EOPNOTSUPP;
		goto out;
	}

	ath_dbg(common, ATH_DBG_CONFIG,
		"Attach a VIF of type: %d\n", vif->type);

	priv->ah->opmode = vif->type;

	/* Index starts from zero on the target */
	avp->index = hvif.index = priv->nvifs;
	hvif.rtsthreshold = cpu_to_be16(2304);
	WMI_CMD_BUF(WMI_VAP_CREATE_CMDID, &hvif);
	if (ret)
		goto out;

	priv->nvifs++;

	/*
	 * We need a node in target to tx mgmt frames
	 * before association.
	 */
	ret = ath9k_htc_add_station(priv, vif, NULL);
	if (ret)
		goto out;

	ret = ath9k_htc_update_cap_target(priv);
	if (ret)
		ath_dbg(common, ATH_DBG_CONFIG,
			"Failed to update capability in target\n");

	priv->vif = vif;
out:
	ath9k_htc_ps_restore(priv);
	mutex_unlock(&priv->mutex);

	return ret;
}
예제 #22
0
static int __devinit ad9834_probe(struct spi_device *spi)
{
	struct ad9834_platform_data *pdata = spi->dev.platform_data;
	struct ad9834_state *st;
	struct iio_dev *indio_dev;
	struct regulator *reg;
	int ret;

	if (!pdata) {
		dev_dbg(&spi->dev, "no platform data?\n");
		return -ENODEV;
	}

	reg = regulator_get(&spi->dev, "vcc");
	if (!IS_ERR(reg)) {
		ret = regulator_enable(reg);
		if (ret)
			goto error_put_reg;
	}

	indio_dev = iio_allocate_device(sizeof(*st));
	if (indio_dev == NULL) {
		ret = -ENOMEM;
		goto error_disable_reg;
	}
	spi_set_drvdata(spi, indio_dev);
	st = iio_priv(indio_dev);
	st->mclk = pdata->mclk;
	st->spi = spi;
	st->devid = spi_get_device_id(spi)->driver_data;
	st->reg = reg;
	indio_dev->dev.parent = &spi->dev;
	indio_dev->name = spi_get_device_id(spi)->name;
	indio_dev->info = &ad9834_info;
	indio_dev->modes = INDIO_DIRECT_MODE;

	/* Setup default messages */

	st->xfer.tx_buf = &st->data;
	st->xfer.len = 2;

	spi_message_init(&st->msg);
	spi_message_add_tail(&st->xfer, &st->msg);

	st->freq_xfer[0].tx_buf = &st->freq_data[0];
	st->freq_xfer[0].len = 2;
	st->freq_xfer[0].cs_change = 1;
	st->freq_xfer[1].tx_buf = &st->freq_data[1];
	st->freq_xfer[1].len = 2;

	spi_message_init(&st->freq_msg);
	spi_message_add_tail(&st->freq_xfer[0], &st->freq_msg);
	spi_message_add_tail(&st->freq_xfer[1], &st->freq_msg);

	st->control = AD9834_B28 | AD9834_RESET;

	if (!pdata->en_div2)
		st->control |= AD9834_DIV2;

	if (!pdata->en_signbit_msb_out && (st->devid == ID_AD9834))
		st->control |= AD9834_SIGN_PIB;

	st->data = cpu_to_be16(AD9834_REG_CMD | st->control);
	ret = spi_sync(st->spi, &st->msg);
	if (ret) {
		dev_err(&spi->dev, "device init failed\n");
		goto error_free_device;
	}

	ret = ad9834_write_frequency(st, AD9834_REG_FREQ0, pdata->freq0);
	if (ret)
		goto error_free_device;

	ret = ad9834_write_frequency(st, AD9834_REG_FREQ1, pdata->freq1);
	if (ret)
		goto error_free_device;

	ret = ad9834_write_phase(st, AD9834_REG_PHASE0, pdata->phase0);
	if (ret)
		goto error_free_device;

	ret = ad9834_write_phase(st, AD9834_REG_PHASE1, pdata->phase1);
	if (ret)
		goto error_free_device;

	ret = iio_device_register(indio_dev);
	if (ret)
		goto error_free_device;

	return 0;

error_free_device:
	iio_free_device(indio_dev);
error_disable_reg:
	if (!IS_ERR(reg))
		regulator_disable(reg);
error_put_reg:
	if (!IS_ERR(reg))
		regulator_put(reg);
	return ret;
}
static int ath9k_htc_start(struct ieee80211_hw *hw)
{
	struct ath9k_htc_priv *priv = hw->priv;
	struct ath_hw *ah = priv->ah;
	struct ath_common *common = ath9k_hw_common(ah);
	struct ieee80211_channel *curchan = hw->conf.channel;
	struct ath9k_channel *init_channel;
	int ret = 0;
	enum htc_phymode mode;
	__be16 htc_mode;
	u8 cmd_rsp;

	mutex_lock(&priv->mutex);

	ath_dbg(common, ATH_DBG_CONFIG,
		"Starting driver with initial channel: %d MHz\n",
		curchan->center_freq);

	/* Ensure that HW is awake before flushing RX */
	ath9k_htc_setpower(priv, ATH9K_PM_AWAKE);
	WMI_CMD(WMI_FLUSH_RECV_CMDID);

	/* setup initial channel */
	init_channel = ath9k_cmn_get_curchannel(hw, ah);

	ath9k_hw_htc_resetinit(ah);
	ret = ath9k_hw_reset(ah, init_channel, ah->caldata, false);
	if (ret) {
		ath_err(common,
			"Unable to reset hardware; reset status %d (freq %u MHz)\n",
			ret, curchan->center_freq);
		mutex_unlock(&priv->mutex);
		return ret;
	}

	ath9k_cmn_update_txpow(ah, priv->curtxpow, priv->txpowlimit,
			       &priv->curtxpow);

	mode = ath9k_htc_get_curmode(priv, init_channel);
	htc_mode = cpu_to_be16(mode);
	WMI_CMD_BUF(WMI_SET_MODE_CMDID, &htc_mode);
	WMI_CMD(WMI_ATH_INIT_CMDID);
	WMI_CMD(WMI_START_RECV_CMDID);

	ath9k_host_rx_init(priv);

	priv->op_flags &= ~OP_INVALID;
	htc_start(priv->htc);

	spin_lock_bh(&priv->tx_lock);
	priv->tx_queues_stop = false;
	spin_unlock_bh(&priv->tx_lock);

	ieee80211_wake_queues(hw);

	if (ah->btcoex_hw.scheme == ATH_BTCOEX_CFG_3WIRE) {
		ath9k_hw_btcoex_set_weight(ah, AR_BT_COEX_WGHT,
					   AR_STOMP_LOW_WLAN_WGHT);
		ath9k_hw_btcoex_enable(ah);
		ath_htc_resume_btcoex_work(priv);
	}
	mutex_unlock(&priv->mutex);

	return ret;
}
예제 #24
0
static ssize_t ad9834_write(struct device *dev,
		struct device_attribute *attr,
		const char *buf,
		size_t len)
{
	struct iio_dev *dev_info = dev_get_drvdata(dev);
	struct ad9834_state *st = iio_priv(dev_info);
	struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
	int ret;
	long val;

	ret = strict_strtoul(buf, 10, &val);
	if (ret)
		goto error_ret;

	mutex_lock(&dev_info->mlock);
	switch (this_attr->address) {
	case AD9834_REG_FREQ0:
	case AD9834_REG_FREQ1:
		ret = ad9834_write_frequency(st, this_attr->address, val);
		break;
	case AD9834_REG_PHASE0:
	case AD9834_REG_PHASE1:
		ret = ad9834_write_phase(st, this_attr->address, val);
		break;
	case AD9834_OPBITEN:
		if (st->control & AD9834_MODE) {
			ret = -EINVAL;  /* AD9843 reserved mode */
			break;
		}

		if (val)
			st->control |= AD9834_OPBITEN;
		else
			st->control &= ~AD9834_OPBITEN;

		st->data = cpu_to_be16(AD9834_REG_CMD | st->control);
		ret = spi_sync(st->spi, &st->msg);
		break;
	case AD9834_PIN_SW:
		if (val)
			st->control |= AD9834_PIN_SW;
		else
			st->control &= ~AD9834_PIN_SW;
		st->data = cpu_to_be16(AD9834_REG_CMD | st->control);
		ret = spi_sync(st->spi, &st->msg);
		break;
	case AD9834_FSEL:
	case AD9834_PSEL:
		if (val == 0)
			st->control &= ~(this_attr->address | AD9834_PIN_SW);
		else if (val == 1) {
			st->control |= this_attr->address;
			st->control &= ~AD9834_PIN_SW;
		} else {
			ret = -EINVAL;
			break;
		}
		st->data = cpu_to_be16(AD9834_REG_CMD | st->control);
		ret = spi_sync(st->spi, &st->msg);
		break;
	case AD9834_RESET:
		if (val)
			st->control &= ~AD9834_RESET;
		else
			st->control |= AD9834_RESET;

		st->data = cpu_to_be16(AD9834_REG_CMD | st->control);
		ret = spi_sync(st->spi, &st->msg);
		break;
	default:
		ret = -ENODEV;
	}
	mutex_unlock(&dev_info->mlock);

error_ret:
	return ret ? ret : len;
}
static int msm_sensor_read_otp(struct msm_sensor_ctrl_t *s_ctrl)
{
	uint16_t status_reg;

	int16_t page_id = 0;
	int16_t poll_times = 0;

	int32_t rc = 0;

	struct msm_camera_sensor_slave_info *camera_info;
	struct otp_info_t *otp;

	camera_info = s_ctrl->sensordata->cam_slave_info;
	if (!camera_info) {
		pr_warn("%s: camera slave info is not defined\n", __func__);
		return -EAGAIN;
	}

	otp = &camera_info->sensor_init_params.sensor_otp;
	if (otp->otp_info) {
		pr_devel("%s: %s OTP already read\n", __func__,
			s_ctrl->sensordata->sensor_name);
		return 0;

	} else if (!otp->enable) {
		pr_warn("%s: %s OTP disabled in sensor lib\n", __func__,
			s_ctrl->sensordata->sensor_name);
		return 0;
	}

	pr_devel("%s sensor OTP initialization block:\n"
			" - page size: %d\n"
			" - pages count: %d\n"
			" - page register address: 0x%x\n"
			" - first page base address: 0x%x\n"
			" - control register address: 0x%x\n"
			" - read mode setting: 0x%x\n"
			" - status register address: 0x%x\n"
			" - read complete bit: 0x%x\n"
			" - reset register address: 0x%x\n"
			" - stream on: 0x%x\n"
			" - stream off: 0x%x\n"
			" - data segment address: 0x%x\n"
			" - data size: %d\n"
			" - %s endian\n"
			" - poll times: %d\n"
			" - poll sleep delay: %d\n",

			s_ctrl->sensordata->sensor_name,

			otp->page_size,
			otp->num_of_pages,
			otp->page_reg_addr,
			otp->page_reg_base_addr,
			otp->ctrl_reg_addr,
			otp->ctrl_reg_read_mode,
			otp->status_reg_addr,
			otp->status_reg_read_complete_bit,
			otp->reset_reg_addr,
			otp->reset_reg_stream_on,
			otp->reset_reg_stream_off,
			otp->data_seg_addr,
			otp->data_size,
			otp->big_endian ? "big" : "little",
			otp->poll_times,
			otp->poll_usleep
			);

	/* Allocate OTP memory */
	otp->otp_info = kzalloc(otp->page_size*otp->num_of_pages, GFP_KERNEL);
	if (otp->otp_info == NULL) {
		pr_err("%s: Unable to allocate memory for OTP!\n", __func__);
		return -ENOMEM;
	}


	/* Enable Streaming */
	if (otp->reset_reg_addr) {
		rc = s_ctrl->sensor_i2c_client->i2c_func_tbl->i2c_write(
			s_ctrl->sensor_i2c_client,
			otp->reset_reg_addr,
			otp->reset_reg_stream_on,
			otp->data_size);
		if (rc < 0) {
			pr_err("%s: Unable to stream on the sensor!\n",
					__func__);
			goto exit;
		}
	}


	for (page_id = 0; page_id < otp->num_of_pages; ++page_id) {

		uint16_t page_addr = otp->page_reg_base_addr + page_id;
		if (otp->data_size == MSM_CAMERA_I2C_WORD_DATA &&
				otp->big_endian) {
			page_addr = cpu_to_be16(page_addr);
		}

		/* Write OTP page address */
		rc = s_ctrl->sensor_i2c_client->i2c_func_tbl->i2c_write(
			s_ctrl->sensor_i2c_client,
			otp->page_reg_addr, page_addr,
			otp->data_size);
		if (rc < 0) {
			pr_err("%s: Unable to write OTP page address!\n",
					__func__);
			goto disable_streaming;
		}

		/* Set OTP read mode */
		rc = s_ctrl->sensor_i2c_client->i2c_func_tbl->i2c_write(
			s_ctrl->sensor_i2c_client,
			otp->ctrl_reg_addr,
			otp->ctrl_reg_read_mode,
			otp->data_size);
		if (rc < 0) {
			pr_err("%s: Unable to set OTP read mode!\n", __func__);
			goto disable_streaming;
		}

		/* Poll status register until read complete flag is set */
		while (poll_times < otp->poll_times) {
			rc = s_ctrl->sensor_i2c_client->i2c_func_tbl->i2c_read(
				s_ctrl->sensor_i2c_client,
				otp->status_reg_addr, &status_reg,
				otp->data_size);
			if (rc < 0) {
				pr_err("%s: Unable to read OTP status!\n",
						__func__);
				goto disable_streaming;
			}

			if (status_reg & otp->status_reg_read_complete_bit)
				break;

			usleep(otp->poll_usleep);
			poll_times++;
		}


		/* Read OTP data */
		if (!(status_reg & otp->status_reg_read_complete_bit)) {
			pr_devel("%s: OTP read of page 0x%x failed\n",
				__func__, otp->page_reg_base_addr + page_id);
			continue;
		}

		rc = s_ctrl->sensor_i2c_client->i2c_func_tbl->
			i2c_read_seq(s_ctrl->sensor_i2c_client,
			otp->data_seg_addr,
			otp->otp_info + page_id*otp->page_size,
			otp->page_size);
		if (rc < 0) {
			pr_err("%s: Unable to read OTP page 0x%x over i2c!\n",
				__func__, otp->page_reg_base_addr + page_id);
			goto disable_streaming;
		}

		pr_devel("%s: OTP read of page 0x%x successful\n",
			__func__, otp->page_reg_base_addr + page_id);
	}

disable_streaming:
	/* Disable streaming */
	if (otp->reset_reg_addr) {
		s_ctrl->sensor_i2c_client->i2c_func_tbl->i2c_write(
				s_ctrl->sensor_i2c_client,
				otp->reset_reg_addr,
				otp->reset_reg_stream_off,
				otp->data_size);
	}

exit:
	return rc;
}
예제 #26
0
/*
 * rebuilds an inode tree given a cursor.  We're lazy here and call
 * the routine that builds the agi
 */
static void
build_ino_tree(xfs_mount_t *mp, xfs_agnumber_t agno,
		bt_status_t *btree_curs, __uint32_t magic,
		struct agi_stat *agi_stat, int finobt)
{
	xfs_agnumber_t		i;
	xfs_agblock_t		j;
	xfs_agblock_t		agbno;
	xfs_agino_t		first_agino;
	struct xfs_btree_block	*bt_hdr;
	xfs_inobt_rec_t		*bt_rec;
	ino_tree_node_t		*ino_rec;
	bt_stat_level_t		*lptr;
	xfs_agino_t		count = 0;
	xfs_agino_t		freecount = 0;
	int			inocnt;
	uint8_t			finocnt;
	int			k;
	int			level = btree_curs->num_levels;
	int			spmask;
	uint64_t		sparse;
	uint16_t		holemask;

	for (i = 0; i < level; i++)  {
		lptr = &btree_curs->level[i];

		agbno = get_next_blockaddr(agno, i, btree_curs);
		lptr->buf_p = libxfs_getbuf(mp->m_dev,
					XFS_AGB_TO_DADDR(mp, agno, agbno),
					XFS_FSB_TO_BB(mp, 1));

		if (i == btree_curs->num_levels - 1)
			btree_curs->root = agbno;

		lptr->agbno = agbno;
		lptr->prev_agbno = NULLAGBLOCK;
		lptr->prev_buf_p = NULL;
		/*
		 * initialize block header
		 */

		lptr->buf_p->b_ops = &xfs_inobt_buf_ops;
		bt_hdr = XFS_BUF_TO_BLOCK(lptr->buf_p);
		memset(bt_hdr, 0, mp->m_sb.sb_blocksize);
		if (xfs_sb_version_hascrc(&mp->m_sb))
			xfs_btree_init_block(mp, lptr->buf_p, magic,
						i, 0, agno,
						XFS_BTREE_CRC_BLOCKS);
		else
			xfs_btree_init_block(mp, lptr->buf_p, magic,
						i, 0, agno, 0);
	}

	/*
	 * run along leaf, setting up records.  as we have to switch
	 * blocks, call the prop_ino_cursor routine to set up the new
	 * pointers for the parent.  that can recurse up to the root
	 * if required.  set the sibling pointers for leaf level here.
	 */
	if (finobt)
		ino_rec = findfirst_free_inode_rec(agno);
	else
		ino_rec = findfirst_inode_rec(agno);

	if (ino_rec != NULL)
		first_agino = ino_rec->ino_startnum;
	else
		first_agino = NULLAGINO;

	lptr = &btree_curs->level[0];

	for (i = 0; i < lptr->num_blocks; i++)  {
		/*
		 * block initialization, lay in block header
		 */
		lptr->buf_p->b_ops = &xfs_inobt_buf_ops;
		bt_hdr = XFS_BUF_TO_BLOCK(lptr->buf_p);
		memset(bt_hdr, 0, mp->m_sb.sb_blocksize);
		if (xfs_sb_version_hascrc(&mp->m_sb))
			xfs_btree_init_block(mp, lptr->buf_p, magic,
						0, 0, agno,
						XFS_BTREE_CRC_BLOCKS);
		else
			xfs_btree_init_block(mp, lptr->buf_p, magic,
						0, 0, agno, 0);

		bt_hdr->bb_u.s.bb_leftsib = cpu_to_be32(lptr->prev_agbno);
		bt_hdr->bb_numrecs = cpu_to_be16(lptr->num_recs_pb +
							(lptr->modulo > 0));

		if (lptr->modulo > 0)
			lptr->modulo--;

		if (lptr->num_recs_pb > 0)
			prop_ino_cursor(mp, agno, btree_curs,
					ino_rec->ino_startnum, 0);

		bt_rec = (xfs_inobt_rec_t *)
			  ((char *)bt_hdr + XFS_INOBT_BLOCK_LEN(mp));
		for (j = 0; j < be16_to_cpu(bt_hdr->bb_numrecs); j++) {
			ASSERT(ino_rec != NULL);
			bt_rec[j].ir_startino =
					cpu_to_be32(ino_rec->ino_startnum);
			bt_rec[j].ir_free = cpu_to_be64(ino_rec->ir_free);

			inocnt = finocnt = 0;
			for (k = 0; k < sizeof(xfs_inofree_t)*NBBY; k++)  {
				ASSERT(is_inode_confirmed(ino_rec, k));

				if (is_inode_sparse(ino_rec, k))
					continue;
				if (is_inode_free(ino_rec, k))
					finocnt++;
				inocnt++;
			}

			/*
			 * Set the freecount and check whether we need to update
			 * the sparse format fields. Otherwise, skip to the next
			 * record.
			 */
			inorec_set_freecount(mp, &bt_rec[j], finocnt);
			if (!xfs_sb_version_hassparseinodes(&mp->m_sb))
				goto nextrec;

			/*
			 * Convert the 64-bit in-core sparse inode state to the
			 * 16-bit on-disk holemask.
			 */
			holemask = 0;
			spmask = (1 << XFS_INODES_PER_HOLEMASK_BIT) - 1;
			sparse = ino_rec->ir_sparse;
			for (k = 0; k < XFS_INOBT_HOLEMASK_BITS; k++) {
				if (sparse & spmask) {
					ASSERT((sparse & spmask) == spmask);
					holemask |= (1 << k);
				} else
					ASSERT((sparse & spmask) == 0);
				sparse >>= XFS_INODES_PER_HOLEMASK_BIT;
			}

			bt_rec[j].ir_u.sp.ir_count = inocnt;
			bt_rec[j].ir_u.sp.ir_holemask = cpu_to_be16(holemask);

nextrec:
			freecount += finocnt;
			count += inocnt;

			if (finobt)
				ino_rec = next_free_ino_rec(ino_rec);
			else
				ino_rec = next_ino_rec(ino_rec);
		}

		if (ino_rec != NULL)  {
			/*
			 * get next leaf level block
			 */
			if (lptr->prev_buf_p != NULL)  {
#ifdef XR_BLD_INO_TRACE
				fprintf(stderr, "writing inobt agbno %u\n",
					lptr->prev_agbno);
#endif
				ASSERT(lptr->prev_agbno != NULLAGBLOCK);
				libxfs_writebuf(lptr->prev_buf_p, 0);
			}
			lptr->prev_buf_p = lptr->buf_p;
			lptr->prev_agbno = lptr->agbno;
			lptr->agbno = get_next_blockaddr(agno, 0, btree_curs);
			bt_hdr->bb_u.s.bb_rightsib = cpu_to_be32(lptr->agbno);

			lptr->buf_p = libxfs_getbuf(mp->m_dev,
					XFS_AGB_TO_DADDR(mp, agno, lptr->agbno),
					XFS_FSB_TO_BB(mp, 1));
		}
	}

	if (agi_stat) {
		agi_stat->first_agino = first_agino;
		agi_stat->count = count;
		agi_stat->freecount = freecount;
	}
}
int mthca_process_mad(struct ib_device *ibdev,
		      int mad_flags,
		      u8 port_num,
		      struct ib_wc *in_wc,
		      struct ib_grh *in_grh,
		      struct ib_mad *in_mad,
		      struct ib_mad *out_mad)
{
	int err;
	u8 status;
	u16 slid = in_wc ? in_wc->slid : be16_to_cpu(IB_LID_PERMISSIVE);

	/* Forward locally generated traps to the SM */
	if (in_mad->mad_hdr.method == IB_MGMT_METHOD_TRAP &&
	    slid == 0) {
		forward_trap(to_mdev(ibdev), port_num, in_mad);
		return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED;
	}

	/*
	 * Only handle SM gets, sets and trap represses for SM class
	 *
	 * Only handle PMA and Mellanox vendor-specific class gets and
	 * sets for other classes.
	 */
	if (in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED ||
	    in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
		if (in_mad->mad_hdr.method   != IB_MGMT_METHOD_GET &&
		    in_mad->mad_hdr.method   != IB_MGMT_METHOD_SET &&
		    in_mad->mad_hdr.method   != IB_MGMT_METHOD_TRAP_REPRESS)
			return IB_MAD_RESULT_SUCCESS;

		/*
		 * Don't process SMInfo queries or vendor-specific
		 * MADs -- the SMA can't handle them.
		 */
		if (in_mad->mad_hdr.attr_id == IB_SMP_ATTR_SM_INFO ||
		    ((in_mad->mad_hdr.attr_id & IB_SMP_ATTR_VENDOR_MASK) ==
		     IB_SMP_ATTR_VENDOR_MASK))
			return IB_MAD_RESULT_SUCCESS;
	} else if (in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_PERF_MGMT ||
		   in_mad->mad_hdr.mgmt_class == MTHCA_VENDOR_CLASS1     ||
		   in_mad->mad_hdr.mgmt_class == MTHCA_VENDOR_CLASS2) {
		if (in_mad->mad_hdr.method  != IB_MGMT_METHOD_GET &&
		    in_mad->mad_hdr.method  != IB_MGMT_METHOD_SET)
			return IB_MAD_RESULT_SUCCESS;
	} else
		return IB_MAD_RESULT_SUCCESS;

	err = mthca_MAD_IFC(to_mdev(ibdev),
			    mad_flags & IB_MAD_IGNORE_MKEY,
			    mad_flags & IB_MAD_IGNORE_BKEY,
			    port_num, in_wc, in_grh, in_mad, out_mad,
			    &status);
	if (err) {
		mthca_err(to_mdev(ibdev), "MAD_IFC failed\n");
		return IB_MAD_RESULT_FAILURE;
	}
	if (status == MTHCA_CMD_STAT_BAD_PKT)
		return IB_MAD_RESULT_SUCCESS;
	if (status) {
		mthca_err(to_mdev(ibdev), "MAD_IFC returned status %02x\n",
			  status);
		return IB_MAD_RESULT_FAILURE;
	}

	if (!out_mad->mad_hdr.status)
		smp_snoop(ibdev, port_num, in_mad);

	/* set return bit in status of directed route responses */
	if (in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
		out_mad->mad_hdr.status |= cpu_to_be16(1 << 15);

	if (in_mad->mad_hdr.method == IB_MGMT_METHOD_TRAP_REPRESS)
		/* no response for trap repress */
		return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED;

	return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY;
}
예제 #28
0
/*
 * rebuilds a freespace tree given a cursor and magic number of type
 * of tree to build (bno or bcnt).  returns the number of free blocks
 * represented by the tree.
 */
static xfs_extlen_t
build_freespace_tree(xfs_mount_t *mp, xfs_agnumber_t agno,
		bt_status_t *btree_curs, __uint32_t magic)
{
	xfs_agnumber_t		i;
	xfs_agblock_t		j;
	struct xfs_btree_block	*bt_hdr;
	xfs_alloc_rec_t		*bt_rec;
	int			level;
	xfs_agblock_t		agbno;
	extent_tree_node_t	*ext_ptr;
	bt_stat_level_t		*lptr;
	xfs_extlen_t		freeblks;
	__uint32_t		crc_magic;

#ifdef XR_BLD_FREE_TRACE
	fprintf(stderr, "in build_freespace_tree, agno = %d\n", agno);
#endif
	level = btree_curs->num_levels;
	freeblks = 0;

	ASSERT(level > 0);
	if (magic == XFS_ABTB_MAGIC)
		crc_magic = XFS_ABTB_CRC_MAGIC;
	else
		crc_magic = XFS_ABTC_CRC_MAGIC;

	/*
	 * initialize the first block on each btree level
	 */
	for (i = 0; i < level; i++)  {
		lptr = &btree_curs->level[i];

		agbno = get_next_blockaddr(agno, i, btree_curs);
		lptr->buf_p = libxfs_getbuf(mp->m_dev,
					XFS_AGB_TO_DADDR(mp, agno, agbno),
					XFS_FSB_TO_BB(mp, 1));

		if (i == btree_curs->num_levels - 1)
			btree_curs->root = agbno;

		lptr->agbno = agbno;
		lptr->prev_agbno = NULLAGBLOCK;
		lptr->prev_buf_p = NULL;
		/*
		 * initialize block header
		 */
		lptr->buf_p->b_ops = &xfs_allocbt_buf_ops;
		bt_hdr = XFS_BUF_TO_BLOCK(lptr->buf_p);
		memset(bt_hdr, 0, mp->m_sb.sb_blocksize);
		if (xfs_sb_version_hascrc(&mp->m_sb))
			xfs_btree_init_block(mp, lptr->buf_p, crc_magic, i,
						0, agno, XFS_BTREE_CRC_BLOCKS);
		else
			xfs_btree_init_block(mp, lptr->buf_p, magic, i,
						0, agno, 0);
	}
	/*
	 * run along leaf, setting up records.  as we have to switch
	 * blocks, call the prop_freespace_cursor routine to set up the new
	 * pointers for the parent.  that can recurse up to the root
	 * if required.  set the sibling pointers for leaf level here.
	 */
	if (magic == XFS_ABTB_MAGIC)
		ext_ptr = findfirst_bno_extent(agno);
	else
		ext_ptr = findfirst_bcnt_extent(agno);

#ifdef XR_BLD_FREE_TRACE
	fprintf(stderr, "bft, agno = %d, start = %u, count = %u\n",
		agno, ext_ptr->ex_startblock, ext_ptr->ex_blockcount);
#endif

	lptr = &btree_curs->level[0];

	for (i = 0; i < btree_curs->level[0].num_blocks; i++)  {
		/*
		 * block initialization, lay in block header
		 */
		lptr->buf_p->b_ops = &xfs_allocbt_buf_ops;
		bt_hdr = XFS_BUF_TO_BLOCK(lptr->buf_p);
		memset(bt_hdr, 0, mp->m_sb.sb_blocksize);
		if (xfs_sb_version_hascrc(&mp->m_sb))
			xfs_btree_init_block(mp, lptr->buf_p, crc_magic, 0,
						0, agno, XFS_BTREE_CRC_BLOCKS);
		else
			xfs_btree_init_block(mp, lptr->buf_p, magic, 0,
						0, agno, 0);

		bt_hdr->bb_u.s.bb_leftsib = cpu_to_be32(lptr->prev_agbno);
		bt_hdr->bb_numrecs = cpu_to_be16(lptr->num_recs_pb +
							(lptr->modulo > 0));
#ifdef XR_BLD_FREE_TRACE
		fprintf(stderr, "bft, bb_numrecs = %d\n",
				be16_to_cpu(bt_hdr->bb_numrecs));
#endif

		if (lptr->modulo > 0)
			lptr->modulo--;

		/*
		 * initialize values in the path up to the root if
		 * this is a multi-level btree
		 */
		if (btree_curs->num_levels > 1)
			prop_freespace_cursor(mp, agno, btree_curs,
					ext_ptr->ex_startblock,
					ext_ptr->ex_blockcount,
					0, magic);

		bt_rec = (xfs_alloc_rec_t *)
			  ((char *)bt_hdr + XFS_ALLOC_BLOCK_LEN(mp));
		for (j = 0; j < be16_to_cpu(bt_hdr->bb_numrecs); j++) {
			ASSERT(ext_ptr != NULL);
			bt_rec[j].ar_startblock = cpu_to_be32(
							ext_ptr->ex_startblock);
			bt_rec[j].ar_blockcount = cpu_to_be32(
							ext_ptr->ex_blockcount);
			freeblks += ext_ptr->ex_blockcount;
			if (magic == XFS_ABTB_MAGIC)
				ext_ptr = findnext_bno_extent(ext_ptr);
			else
				ext_ptr = findnext_bcnt_extent(agno, ext_ptr);
#if 0
#ifdef XR_BLD_FREE_TRACE
			if (ext_ptr == NULL)
				fprintf(stderr, "null extent pointer, j = %d\n",
					j);
			else
				fprintf(stderr,
				"bft, agno = %d, start = %u, count = %u\n",
					agno, ext_ptr->ex_startblock,
					ext_ptr->ex_blockcount);
#endif
#endif
		}

		if (ext_ptr != NULL)  {
			/*
			 * get next leaf level block
			 */
			if (lptr->prev_buf_p != NULL)  {
#ifdef XR_BLD_FREE_TRACE
				fprintf(stderr, " writing fst agbno %u\n",
					lptr->prev_agbno);
#endif
				ASSERT(lptr->prev_agbno != NULLAGBLOCK);
				libxfs_writebuf(lptr->prev_buf_p, 0);
			}
			lptr->prev_buf_p = lptr->buf_p;
			lptr->prev_agbno = lptr->agbno;
			lptr->agbno = get_next_blockaddr(agno, 0, btree_curs);
			bt_hdr->bb_u.s.bb_rightsib = cpu_to_be32(lptr->agbno);

			lptr->buf_p = libxfs_getbuf(mp->m_dev,
					XFS_AGB_TO_DADDR(mp, agno, lptr->agbno),
					XFS_FSB_TO_BB(mp, 1));
		}
	}

	return(freeblks);
}
예제 #29
0
	xfrm_dst_destroy(xdst);
}

static void xfrm4_dst_ifdown(struct dst_entry *dst, struct net_device *dev,
			     int unregister)
{
	if (!unregister)
		return;

	xfrm_dst_ifdown(dst, dev);
}

static struct dst_ops xfrm4_dst_ops = {
	.family =		AF_INET,
	.protocol =		cpu_to_be16(ETH_P_IP),
	.gc =			xfrm4_garbage_collect,
	.update_pmtu =		xfrm4_update_pmtu,
	.cow_metrics =		dst_cow_metrics_generic,
	.destroy =		xfrm4_dst_destroy,
	.ifdown =		xfrm4_dst_ifdown,
	.local_out =		__ip_local_out,
	.gc_thresh =		1024,
};

static struct xfrm_policy_afinfo xfrm4_policy_afinfo = {
	.family = 		AF_INET,
	.dst_ops =		&xfrm4_dst_ops,
	.dst_lookup =		xfrm4_dst_lookup,
	.get_saddr =		xfrm4_get_saddr,
	.decode_session =	_decode_session4,
예제 #30
0
파일: t4vf_hw.c 프로젝트: BozkurTR/kernel
/**
 *	t4vf_get_port_stats - collect "port" statistics
 *	@adapter: the adapter
 *	@pidx: the port index
 *	@s: the stats structure to fill
 *
 *	Collect statistics for the "port"'s Virtual Interface.
 */
int t4vf_get_port_stats(struct adapter *adapter, int pidx,
			struct t4vf_port_stats *s)
{
	struct port_info *pi = adap2pinfo(adapter, pidx);
	struct fw_vi_stats_vf fwstats;
	unsigned int rem = VI_VF_NUM_STATS;
	__be64 *fwsp = (__be64 *)&fwstats;

	/*
	 * Grab the Virtual Interface statistics a chunk at a time via mailbox
	 * commands.  We could use a Work Request and get all of them at once
	 * but that's an asynchronous interface which is awkward to use.
	 */
	while (rem) {
		unsigned int ix = VI_VF_NUM_STATS - rem;
		unsigned int nstats = min(6U, rem);
		struct fw_vi_stats_cmd cmd, rpl;
		size_t len = (offsetof(struct fw_vi_stats_cmd, u) +
			      sizeof(struct fw_vi_stats_ctl));
		size_t len16 = DIV_ROUND_UP(len, 16);
		int ret;

		memset(&cmd, 0, sizeof(cmd));
		cmd.op_to_viid = cpu_to_be32(FW_CMD_OP(FW_VI_STATS_CMD) |
					     FW_VI_STATS_CMD_VIID(pi->viid) |
					     FW_CMD_REQUEST |
					     FW_CMD_READ);
		cmd.retval_len16 = cpu_to_be32(FW_CMD_LEN16(len16));
		cmd.u.ctl.nstats_ix =
			cpu_to_be16(FW_VI_STATS_CMD_IX(ix) |
				    FW_VI_STATS_CMD_NSTATS(nstats));
		ret = t4vf_wr_mbox_ns(adapter, &cmd, len, &rpl);
		if (ret)
			return ret;

		memcpy(fwsp, &rpl.u.ctl.stat0, sizeof(__be64) * nstats);

		rem -= nstats;
		fwsp += nstats;
	}

	/*
	 * Translate firmware statistics into host native statistics.
	 */
	s->tx_bcast_bytes = be64_to_cpu(fwstats.tx_bcast_bytes);
	s->tx_bcast_frames = be64_to_cpu(fwstats.tx_bcast_frames);
	s->tx_mcast_bytes = be64_to_cpu(fwstats.tx_mcast_bytes);
	s->tx_mcast_frames = be64_to_cpu(fwstats.tx_mcast_frames);
	s->tx_ucast_bytes = be64_to_cpu(fwstats.tx_ucast_bytes);
	s->tx_ucast_frames = be64_to_cpu(fwstats.tx_ucast_frames);
	s->tx_drop_frames = be64_to_cpu(fwstats.tx_drop_frames);
	s->tx_offload_bytes = be64_to_cpu(fwstats.tx_offload_bytes);
	s->tx_offload_frames = be64_to_cpu(fwstats.tx_offload_frames);

	s->rx_bcast_bytes = be64_to_cpu(fwstats.rx_bcast_bytes);
	s->rx_bcast_frames = be64_to_cpu(fwstats.rx_bcast_frames);
	s->rx_mcast_bytes = be64_to_cpu(fwstats.rx_mcast_bytes);
	s->rx_mcast_frames = be64_to_cpu(fwstats.rx_mcast_frames);
	s->rx_ucast_bytes = be64_to_cpu(fwstats.rx_ucast_bytes);
	s->rx_ucast_frames = be64_to_cpu(fwstats.rx_ucast_frames);

	s->rx_err_frames = be64_to_cpu(fwstats.rx_err_frames);

	return 0;
}