Esempio n. 1
0
void brcms_c_stf_phy_chain_calc(struct brcms_c_info *wlc)
{
	/* get available rx/tx chains */
	wlc->stf->hw_txchain = (u8) getintvar(wlc->hw->sih, BRCMS_SROM_TXCHAIN);
	wlc->stf->hw_rxchain = (u8) getintvar(wlc->hw->sih, BRCMS_SROM_RXCHAIN);

	/* these parameter are intended to be used for all PHY types */
	if (wlc->stf->hw_txchain == 0 || wlc->stf->hw_txchain == 0xf) {
		if (BRCMS_ISNPHY(wlc->band))
			wlc->stf->hw_txchain = TXCHAIN_DEF_NPHY;
		else
			wlc->stf->hw_txchain = TXCHAIN_DEF;
	}

	wlc->stf->txchain = wlc->stf->hw_txchain;
	wlc->stf->txstreams = (u8) hweight8(wlc->stf->hw_txchain);

	if (wlc->stf->hw_rxchain == 0 || wlc->stf->hw_rxchain == 0xf) {
		if (BRCMS_ISNPHY(wlc->band))
			wlc->stf->hw_rxchain = RXCHAIN_DEF_NPHY;
		else
			wlc->stf->hw_rxchain = RXCHAIN_DEF;
	}

	wlc->stf->rxchain = wlc->stf->hw_rxchain;
	wlc->stf->rxstreams = (u8) hweight8(wlc->stf->hw_rxchain);

	/* initialize the txcore table */
	memcpy(wlc->stf->txcore, txcore_default, sizeof(wlc->stf->txcore));

	/* default spatial_policy */
	wlc->stf->spatial_policy = MIN_SPATIAL_EXPANSION;
	brcms_c_stf_spatial_policy_set(wlc, MIN_SPATIAL_EXPANSION);
}
Esempio n. 2
0
static int brcms_c_stf_txcore_set(struct brcms_c_info *wlc, u8 Nsts,
				  u8 core_mask)
{
	BCMMSG(wlc->wiphy, "wl%d: Nsts %d core_mask %x\n",
		 wlc->pub->unit, Nsts, core_mask);

	if (hweight8(core_mask) > wlc->stf->txstreams)
		core_mask = 0;

	if ((hweight8(core_mask) == wlc->stf->txstreams) &&
	    ((core_mask & ~wlc->stf->txchain)
	     || !(core_mask & wlc->stf->txchain)))
		core_mask = wlc->stf->txchain;

	wlc->stf->txcore[Nsts] = core_mask;
	/* Nsts = 1..4, txcore index = 1..4 */
	if (Nsts == 1) {
		/* Needs to update beacon and ucode generated response
		 * frames when 1 stream core map changed
		 */
		wlc->stf->phytxant = core_mask << PHY_TXC_ANT_SHIFT;
		brcms_b_txant_set(wlc->hw, wlc->stf->phytxant);
		if (wlc->clk) {
			brcms_c_suspend_mac_and_wait(wlc);
			brcms_c_beacon_phytxctl_txant_upd(wlc, wlc->bcn_rspec);
			brcms_c_enable_mac(wlc);
		}
	}

	return 0;
}
Esempio n. 3
0
static bool spi_mem_buswidth_is_valid(u8 buswidth)
{
	if (hweight8(buswidth) > 1 || buswidth > SPI_MEM_MAX_BUSWIDTH)
		return false;

	return true;
}
Esempio n. 4
0
static irqreturn_t pcips2_interrupt(int irq, void *devid)
{
	struct pcips2_data *ps2if = devid;
	unsigned char status, scancode;
	int handled = 0;

	do {
		unsigned int flag;

		status = inb(ps2if->base + PS2_STATUS);
		if (!(status & PS2_STAT_RXFULL))
			break;
		handled = 1;
		scancode = inb(ps2if->base + PS2_DATA);
		if (status == 0xff && scancode == 0xff)
			break;

		flag = (status & PS2_STAT_PARITY) ? 0 : SERIO_PARITY;

		if (hweight8(scancode) & 1)
			flag ^= SERIO_PARITY;

		serio_interrupt(ps2if->io, scancode, flag);
	} while (1);
	return IRQ_RETVAL(handled);
}
Esempio n. 5
0
static void ske_keypad_report(struct ske_keypad *keypad, u8 status, int col)
{
	int row = 0, code, pos;
	struct input_dev *input = keypad->input;
	u32 ske_ris;
	int key_pressed;
	int num_of_rows;

	/* find out the row */
	num_of_rows = hweight8(status);
	do {
		pos = __ffs(status);
		row = pos;
		status &= ~(1 << pos);

		code = MATRIX_SCAN_CODE(row, col, SKE_KEYPAD_ROW_SHIFT);
		ske_ris = readl(keypad->reg_base + SKE_RIS);
		key_pressed = ske_ris & SKE_KPRISA;

		input_event(input, EV_MSC, MSC_SCAN, code);
		input_report_key(input, keypad->keymap[code], key_pressed);
		input_sync(input);
		num_of_rows--;
	} while (num_of_rows);
}
Esempio n. 6
0
/*
 * Read all bytes waiting in the PS2 port.  There should be
 * at the most one, but we loop for safety.  If there was a
 * framing error, we have to manually clear the status.
 */
static irqreturn_t ps2_rxint(int irq, void *dev_id)
{
	struct ps2if *ps2if = dev_id;
	unsigned int scancode, flag, status;

	status = sa1111_readl(ps2if->base + SA1111_PS2STAT);
	while (status & PS2STAT_RXF) {
		if (status & PS2STAT_STP)
			sa1111_writel(PS2STAT_STP, ps2if->base + SA1111_PS2STAT);

		flag = (status & PS2STAT_STP ? SERIO_FRAME : 0) |
		       (status & PS2STAT_RXP ? 0 : SERIO_PARITY);

		scancode = sa1111_readl(ps2if->base + SA1111_PS2DATA) & 0xff;

		if (hweight8(scancode) & 1)
			flag ^= SERIO_PARITY;

		serio_interrupt(ps2if->io, scancode, flag);

		status = sa1111_readl(ps2if->base + SA1111_PS2STAT);
        }

        return IRQ_HANDLED;
}
Esempio n. 7
0
/*
 * Returns true when there is at least one combination of pressed keys that
 * results in ghosting.
 */
static bool cros_ec_keyb_has_ghosting(struct cros_ec_keyb *ckdev, uint8_t *buf)
{
	int col1, col2, buf1, buf2;
	struct device *dev = ckdev->dev;
	uint8_t *valid_keys = ckdev->valid_keys;

	/*
	 * Ghosting happens if for any pressed key X there are other keys
	 * pressed both in the same row and column of X as, for instance,
	 * in the following diagram:
	 *
	 * . . Y . g .
	 * . . . . . .
	 * . . . . . .
	 * . . X . Z .
	 *
	 * In this case only X, Y, and Z are pressed, but g appears to be
	 * pressed too (see Wikipedia).
	 */
	for (col1 = 0; col1 < ckdev->cols; col1++) {
		buf1 = buf[col1] & valid_keys[col1];
		for (col2 = col1 + 1; col2 < ckdev->cols; col2++) {
			buf2 = buf[col2] & valid_keys[col2];
			if (hweight8(buf1 & buf2) > 1) {
				dev_dbg(dev, "ghost found at: B[%02d]:0x%02x & B[%02d]:0x%02x",
					col1, buf1, col2, buf2);
				return true;
			}
		}
	}

	return false;
}
Esempio n. 8
0
static int read_fcb(struct mtd_info *mtd, int num, struct fcb_block **retfcb)
{
	int i;
	int bitflips = 0, bit_to_flip;
	u8 parity, np, syndrome;
	u8 *fcb, *ecc;
	int ret;
	void *rawpage;

	*retfcb = NULL;

	rawpage = xmalloc(mtd->writesize + mtd->oobsize);

	ret = raw_read_page(mtd, rawpage, mtd->erasesize * num);
	if (ret) {
		pr_err("Cannot read block %d\n", num);
		goto err;
	}

	fcb = rawpage + 12;
	ecc = rawpage + 512 + 12;

	for (i = 0; i < 512; i++) {
		parity = ecc[i];
		np = calculate_parity_13_8(fcb[i]);

		syndrome = np ^ parity;
		if (syndrome == 0)
			continue;

		if (!(hweight8(syndrome) & 1)) {
			pr_err("Uncorrectable error at offset %d\n", i);
			ret = -EIO;
			goto err;
		}

		bit_to_flip = lookup_single_error_13_8(syndrome);
		if (bit_to_flip < 0) {
			pr_err("Uncorrectable error at offset %d\n", i);
			ret = -EIO;
			goto err;
		}

		bitflips++;

		if (bit_to_flip > 7)
			ecc[i] ^= 1 << (bit_to_flip - 8);
		else
			fcb[i] ^= 1 << bit_to_flip;
	}

	*retfcb = xmemdup(rawpage + 12, 512);

	ret = 0;
err:
	free(rawpage);

	return ret;
}
Esempio n. 9
0
static netdev_tx_t mscan_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
	struct can_frame *frame = (struct can_frame *)skb->data;
	struct mscan_priv *priv = netdev_priv(dev);
	struct mscan_regs __iomem *regs = priv->reg_base;
	int i, rtr, buf_id;
	u32 can_id;

	if (can_dropped_invalid_skb(dev, skb))
		return NETDEV_TX_OK;

	out_8(&regs->cantier, 0);

	i = ~priv->tx_active & MSCAN_TXE;
	buf_id = ffs(i) - 1;
	switch (hweight8(i)) {
	case 0:
		netif_stop_queue(dev);
		netdev_err(dev, "Tx Ring full when queue awake!\n");
		return NETDEV_TX_BUSY;
	case 1:
		/*
		 * if buf_id < 3, then current frame will be send out of order,
		 * since buffer with lower id have higher priority (hell..)
		 */
		netif_stop_queue(dev);
	case 2:
		if (buf_id < priv->prev_buf_id) {
			priv->cur_pri++;
			if (priv->cur_pri == 0xff) {
				set_bit(F_TX_WAIT_ALL, &priv->flags);
				netif_stop_queue(dev);
			}
		}
		set_bit(F_TX_PROGRESS, &priv->flags);
		break;
	}
	priv->prev_buf_id = buf_id;
	out_8(&regs->cantbsel, i);

	rtr = frame->can_id & CAN_RTR_FLAG;

	/* RTR is always the lowest bit of interest, then IDs follow */
	if (frame->can_id & CAN_EFF_FLAG) {
		can_id = (frame->can_id & CAN_EFF_MASK)
			 << (MSCAN_EFF_RTR_SHIFT + 1);
		if (rtr)
			can_id |= 1 << MSCAN_EFF_RTR_SHIFT;
		out_be16(&regs->tx.idr3_2, can_id);

		can_id >>= 16;
		/* EFF_FLAGS are between the IDs :( */
		can_id = (can_id & 0x7) | ((can_id << 2) & 0xffe0)
			 | MSCAN_EFF_FLAGS;
	} else {
Esempio n. 10
0
int yaffs_count_chunk_bits(struct yaffs_dev *dev, int blk)
{
	u8 *blk_bits = yaffs_block_bits(dev, blk);
	int i;
	int n = 0;

	for (i = 0; i < dev->chunk_bit_stride; i++, blk_bits++)
		n += hweight8(*blk_bits);

	return n;
}
Esempio n. 11
0
static
bool intel_hdcp_is_ksv_valid(u8 *ksv)
{
	int i, ones = 0;
	/* KSV has 20 1's and 20 0's */
	for (i = 0; i < DRM_HDCP_KSV_LEN; i++)
		ones += hweight8(ksv[i]);
	if (ones != 20)
		return false;
	return true;
}
Esempio n. 12
0
int iwl_mvm_phy_ctx_count(struct iwl_mvm *mvm)
{
	unsigned long phy_ctxt_counter = 0;

	ieee80211_iterate_active_interfaces_atomic(mvm->hw,
						   IEEE80211_IFACE_ITER_NORMAL,
						   iwl_mvm_binding_iterator,
						   &phy_ctxt_counter);

	return hweight8(phy_ctxt_counter);
}
Esempio n. 13
0
static int init_super_from_raw(struct gfs_super_info *g, struct gfs_super *s)
{
	int i, map_size;
	u8 map = 0;
	mutex_init(&g->s_mutex);
	mutex_lock(&g->s_mutex);
	g->s_nfree_chunks= 0;
	
#define S(n) g->n = le32_to_cpu(s->n)
#define _S(n) ({ const typeof(S(n)) __m = S(n); PDEBUG("%s=%ld\n",#n,(long)__m);  __m;})
	S(s_nchunks);
	S(s_ino_root);
	g->s_chunk_size = s->s_chunk_size;
	map_size = __count_map_size(g->s_nchunks);

	g->s_inode_map = kzalloc(map_size, GFP_KERNEL);
	if(!g->s_inode_map) {
		mutex_unlock(&g->s_mutex);
		return -ENOMEM;
	}

	for(i=0 ; i < map_size; ++i) {
		map = g->s_inode_map[i] = s->s_inode_map[i];
		g->s_nfree_chunks += hweight8(~map);
	}
	g->s_nfree_chunks -= hweight8(~map);


	g->s_nfree_chunks += hweight8((~map)<<(8-g->s_nchunks%8)); /* ??  Buggy ??*/

	g->s_chunk_size = PAGE_SIZE/512;


#undef S
#undef _S	



	mutex_unlock(&g->s_mutex);
	return 0;
}
Esempio n. 14
0
static ssize_t occ_sysfs_show(struct device *dev,
			      struct device_attribute *attr, char *buf)
{
	int rc;
	int val = 0;
	struct occ *occ = dev_get_drvdata(dev);
	struct occ_poll_response_header *header;
	struct sensor_device_attribute *sattr = to_sensor_dev_attr(attr);

	rc = occ_update_response(occ);
	if (rc)
		return rc;

	header = (struct occ_poll_response_header *)occ->resp.data;

	switch (sattr->index) {
	case 0:
		val = !!(header->status & OCC_STAT_MASTER);
		break;
	case 1:
		val = !!(header->status & OCC_STAT_ACTIVE);
		break;
	case 2:
		val = !!(header->status & OCC_EXT_STAT_DVFS_OT);
		break;
	case 3:
		val = !!(header->status & OCC_EXT_STAT_DVFS_POWER);
		break;
	case 4:
		val = !!(header->status & OCC_EXT_STAT_MEM_THROTTLE);
		break;
	case 5:
		val = !!(header->status & OCC_EXT_STAT_QUICK_DROP);
		break;
	case 6:
		val = header->occ_state;
		break;
	case 7:
		if (header->status & OCC_STAT_MASTER)
			val = hweight8(header->occs_present);
		else
			val = 1;
		break;
	case 8:
		val = occ->error;
		break;
	default:
		return -EINVAL;
	}

	return snprintf(buf, PAGE_SIZE - 1, "%d\n", val);
}
Esempio n. 15
0
int Check_D_FailBlock(BYTE *redundant)
{
	redundant += REDT_BLOCK;

	if (*redundant == 0xFF)
		return SMSUCCESS;
	if (!*redundant)
		return ERROR;
	if (hweight8(*redundant) < 7)
		return ERROR;

	return SMSUCCESS;
}
Esempio n. 16
0
static u32 ocfs2_local_alloc_count_bits(struct ocfs2_dinode *alloc)
{
	int i;
	u8 *buffer;
	u32 count = 0;
	struct ocfs2_local_alloc *la = OCFS2_LOCAL_ALLOC(alloc);

	buffer = la->la_bitmap;
	for (i = 0; i < le16_to_cpu(la->la_size); i++)
		count += hweight8(buffer[i]);

	trace_ocfs2_local_alloc_count_bits(count);
	return count;
}
Esempio n. 17
0
int mt76_get_txpower(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
		     int *dbm)
{
	struct mt76_dev *dev = hw->priv;
	int n_chains = hweight8(dev->antenna_mask);

	*dbm = DIV_ROUND_UP(dev->txpower_cur, 2);

	/* convert from per-chain power to combined
	 * output on 2x2 devices
	 */
	if (n_chains > 1)
		*dbm += 3;

	return 0;
}
Esempio n. 18
0
static int verify_io_u_pattern(struct verify_header *hdr, struct vcont *vc)
{
    struct thread_data *td = vc->td;
    struct io_u *io_u = vc->io_u;
    char *buf, *pattern;
    unsigned int header_size = __hdr_size(td->o.verify);
    unsigned int len, mod, i, pattern_size;
    int rc;

    pattern = td->o.verify_pattern;
    pattern_size = td->o.verify_pattern_bytes;
    assert(pattern_size != 0);

    (void)paste_format_inplace(pattern, pattern_size,
                               td->o.verify_fmt, td->o.verify_fmt_sz, io_u);

    buf = (void *) hdr + header_size;
    len = get_hdr_inc(td, io_u) - header_size;
    mod = (get_hdr_inc(td, io_u) * vc->hdr_num + header_size) % pattern_size;

    rc = cmp_pattern(pattern, pattern_size, mod, buf, len);
    if (!rc)
        return 0;

    /* Slow path, compare each byte */
    for (i = 0; i < len; i++) {
        if (buf[i] != pattern[mod]) {
            unsigned int bits;

            bits = hweight8(buf[i] ^ pattern[mod]);
            log_err("fio: got pattern '%02x', wanted '%02x'. Bad bits %d\n",
                    (unsigned char)buf[i],
                    (unsigned char)pattern[mod],
                    bits);
            log_err("fio: bad pattern block offset %u\n", i);
            dump_verify_buffers(hdr, vc);
            return EILSEQ;
        }
        mod++;
        if (mod == td->o.verify_pattern_bytes)
            mod = 0;
    }

    /* Unreachable line */
    assert(0);
    return EILSEQ;
}
Esempio n. 19
0
int Check_D_DataStatus(BYTE *redundant)
{
	redundant += REDT_DATA;

	if (*redundant == 0xFF)
		return SMSUCCESS;
	if (!*redundant) {
		ErrXDCode = ERR_DataStatus;
		return ERROR;
	} else
		ErrXDCode = NO_ERROR;

	if (hweight8(*redundant) < 5)
		return ERROR;

	return SMSUCCESS;
}
Esempio n. 20
0
/*
 * Display the address, offset and data bytes at comparison failure.
 * Return number of bitflips encountered.
 */
static size_t memcmpshow(loff_t addr, const void *cs, const void *ct, size_t count)
{
	const unsigned char *su1, *su2;
	int res;
	size_t i = 0;
	size_t bitflips = 0;

	for (su1 = cs, su2 = ct; 0 < count; ++su1, ++su2, count--, i++) {
		res = *su1 ^ *su2;
		if (res) {
			pr_info("error @addr[0x%lx:0x%zx] 0x%x -> 0x%x diff 0x%x\n",
				(unsigned long)addr, i, *su1, *su2, res);
			bitflips += hweight8(res);
		}
	}

	return bitflips;
}
Esempio n. 21
0
static int verify_io_u_pattern(struct verify_header *hdr, struct vcont *vc)
{
	struct thread_data *td = vc->td;
	struct io_u *io_u = vc->io_u;
	char *buf, *pattern;
	unsigned int header_size = __hdr_size(td->o.verify);
	unsigned int len, mod, i, size, pattern_size;

	pattern = td->o.verify_pattern;
	pattern_size = td->o.verify_pattern_bytes;
	if (pattern_size <= 1)
		pattern_size = MAX_PATTERN_SIZE;
	buf = (void *) hdr + header_size;
	len = get_hdr_inc(td, io_u) - header_size;
	mod = header_size % pattern_size;

	for (i = 0; i < len; i += size) {
		size = pattern_size - mod;
		if (size > (len - i))
			size = len - i;
		if (memcmp(buf + i, pattern + mod, size))
			/* Let the slow compare find the first mismatch byte. */
			break;
		mod = 0;
	}

	for (; i < len; i++) {
		if (buf[i] != pattern[mod]) {
			unsigned int bits;

			bits = hweight8(buf[i] ^ pattern[mod]);
			log_err("fio: got pattern %x, wanted %x. Bad bits %d\n",
				buf[i], pattern[mod], bits);
			log_err("fio: bad pattern block offset %u\n", i);
			dump_verify_buffers(hdr, vc);
			return EILSEQ;
		}
		mod++;
		if (mod == td->o.verify_pattern_bytes)
			mod = 0;
	}

	return 0;
}
Esempio n. 22
0
/*
 * Compare with 0xff and show the address, offset and data bytes at
 * comparison failure. Return number of bitflips encountered.
 */
static size_t memffshow(loff_t addr, loff_t offset, const void *cs,
			size_t count)
{
	const unsigned char *su1;
	int res;
	size_t i = 0;
	size_t bitflips = 0;

	for (su1 = cs; 0 < count; ++su1, count--, i++) {
		res = *su1 ^ 0xff;
		if (res) {
			pr_info("error @addr[0x%lx:0x%lx] 0x%x -> 0xff diff 0x%x\n",
				(unsigned long)addr, (unsigned long)offset + i,
				*su1, res);
			bitflips += hweight8(res);
		}
	}

	return bitflips;
}
Esempio n. 23
0
int brcms_c_stf_txchain_set(struct brcms_c_info *wlc, s32 int_val, bool force)
{
	u8 txchain = (u8) int_val;
	u8 txstreams;
	uint i;

	if (wlc->stf->txchain == txchain)
		return 0;

	if ((txchain & ~wlc->stf->hw_txchain)
	    || !(txchain & wlc->stf->hw_txchain))
		return -EINVAL;

	/*
	 * if nrate override is configured to be non-SISO STF mode, reject
	 * reducing txchain to 1
	 */
	txstreams = (u8) hweight8(txchain);
	if (txstreams > MAX_STREAMS_SUPPORTED)
		return -EINVAL;

	wlc->stf->txchain = txchain;
	wlc->stf->txstreams = txstreams;
	brcms_c_stf_stbc_tx_set(wlc, wlc->band->band_stf_stbc_tx);
	brcms_c_stf_ss_update(wlc, wlc->bandstate[BAND_2G_INDEX]);
	brcms_c_stf_ss_update(wlc, wlc->bandstate[BAND_5G_INDEX]);
	wlc->stf->txant =
	    (wlc->stf->txstreams == 1) ? ANT_TX_FORCE_0 : ANT_TX_DEF;
	_brcms_c_stf_phy_txant_upd(wlc);

	wlc_phy_stf_chain_set(wlc->band->pi, wlc->stf->txchain,
			      wlc->stf->rxchain);

	for (i = 1; i <= MAX_STREAMS_SUPPORTED; i++)
		brcms_c_stf_txcore_set(wlc, (u8) i, txcore_default[i]);

	return 0;
}
Esempio n. 24
0
static void mt76_init_stream_cap(struct mt76_dev *dev,
				 struct ieee80211_supported_band *sband,
				 bool vht)
{
	struct ieee80211_sta_ht_cap *ht_cap = &sband->ht_cap;
	int i, nstream = hweight8(dev->antenna_mask);
	struct ieee80211_sta_vht_cap *vht_cap;
	u16 mcs_map = 0;

	if (nstream > 1)
		ht_cap->cap |= IEEE80211_HT_CAP_TX_STBC;
	else
		ht_cap->cap &= ~IEEE80211_HT_CAP_TX_STBC;

	for (i = 0; i < IEEE80211_HT_MCS_MASK_LEN; i++)
		ht_cap->mcs.rx_mask[i] = i < nstream ? 0xff : 0;

	if (!vht)
		return;

	vht_cap = &sband->vht_cap;
	if (nstream > 1)
		vht_cap->cap |= IEEE80211_VHT_CAP_TXSTBC;
	else
		vht_cap->cap &= ~IEEE80211_VHT_CAP_TXSTBC;

	for (i = 0; i < 8; i++) {
		if (i < nstream)
			mcs_map |= (IEEE80211_VHT_MCS_SUPPORT_0_9 << (i * 2));
		else
			mcs_map |=
				(IEEE80211_VHT_MCS_NOT_SUPPORTED << (i * 2));
	}
	vht_cap->vht_mcs.rx_mcs_map = cpu_to_le16(mcs_map);
	vht_cap->vht_mcs.tx_mcs_map = cpu_to_le16(mcs_map);
}
Esempio n. 25
0
static bool iwl_mvm_power_allow_uapsd(struct iwl_mvm *mvm,
				       struct ieee80211_vif *vif)
{
	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
	unsigned long phy_ctxt_counter = 0;

	ieee80211_iterate_active_interfaces_atomic(mvm->hw,
						   IEEE80211_IFACE_ITER_NORMAL,
						   iwl_mvm_binding_iterator,
						   &phy_ctxt_counter);

	if (!memcmp(mvmvif->uapsd_misbehaving_bssid, vif->bss_conf.bssid,
		    ETH_ALEN))
		return false;

	if (vif->p2p &&
	    !(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_P2P_PS_UAPSD))
		return false;
	/*
	 * Avoid using uAPSD if P2P client is associated to GO that uses
	 * opportunistic power save. This is due to current FW limitation.
	 */
	if (vif->p2p &&
	    (vif->bss_conf.p2p_noa_attr.oppps_ctwindow &
	    IEEE80211_P2P_OPPPS_ENABLE_BIT))
		return false;

	/*
	 * Avoid using uAPSD if client is in DCM -
	 * low latency issue in Miracast
	 */
	if (hweight8(phy_ctxt_counter) >= 2)
		return false;

	return true;
}
Esempio n. 26
0
static int show_cpuinfo(struct seq_file *m, void *v)
{
    struct proc_cpuinfo_notifier_args proc_cpuinfo_notifier_args;
    unsigned long n = (unsigned long) v - 1;
    unsigned int version = cpu_data[n].processor_id;
    unsigned int fp_vers = cpu_data[n].fpu_id;
    char fmt [64];
    int i;

#ifdef CONFIG_SMP
    if (!cpu_online(n))
        return 0;
#endif

    /*
     * For the first processor also print the system type
     */
    if (n == 0) {
        seq_printf(m, "system type\t\t: %s\n", get_system_type());
        if (mips_get_machine_name())
            seq_printf(m, "machine\t\t\t: %s\n",
                       mips_get_machine_name());
    }

    seq_printf(m, "processor\t\t: %ld\n", n);
    sprintf(fmt, "cpu model\t\t: %%s V%%d.%%d%s\n",
            cpu_data[n].options & MIPS_CPU_FPU ? "  FPU V%d.%d" : "");
    seq_printf(m, fmt, __cpu_name[n],
               (version >> 4) & 0x0f, version & 0x0f,
               (fp_vers >> 4) & 0x0f, fp_vers & 0x0f);
    seq_printf(m, "BogoMIPS\t\t: %u.%02u\n",
               cpu_data[n].udelay_val / (500000/HZ),
               (cpu_data[n].udelay_val / (5000/HZ)) % 100);
    seq_printf(m, "wait instruction\t: %s\n", cpu_wait ? "yes" : "no");
    seq_printf(m, "microsecond timers\t: %s\n",
               cpu_has_counter ? "yes" : "no");
    seq_printf(m, "tlb_entries\t\t: %d\n", cpu_data[n].tlbsize);
    seq_printf(m, "extra interrupt vector\t: %s\n",
               cpu_has_divec ? "yes" : "no");
    seq_printf(m, "hardware watchpoint\t: %s",
               cpu_has_watch ? "yes, " : "no\n");
    if (cpu_has_watch) {
        seq_printf(m, "count: %d, address/irw mask: [",
                   cpu_data[n].watch_reg_count);
        for (i = 0; i < cpu_data[n].watch_reg_count; i++)
            seq_printf(m, "%s0x%04x", i ? ", " : "" ,
                       cpu_data[n].watch_reg_masks[i]);
        seq_printf(m, "]\n");
    }
    seq_printf(m, "ASEs implemented\t:%s%s%s%s%s%s\n",
               cpu_has_mips16 ? " mips16" : "",
               cpu_has_mdmx ? " mdmx" : "",
               cpu_has_mips3d ? " mips3d" : "",
               cpu_has_smartmips ? " smartmips" : "",
               cpu_has_dsp ? " dsp" : "",
               cpu_has_mipsmt ? " mt" : ""
              );
    seq_printf(m, "shadow register sets\t: %d\n",
               cpu_data[n].srsets);
    seq_printf(m, "kscratch registers\t: %d\n",
               hweight8(cpu_data[n].kscratch_mask));
    seq_printf(m, "core\t\t\t: %d\n", cpu_data[n].core);

#if defined(CONFIG_MIPS_MT_SMP)
    if (cpu_has_mipsmt)
        seq_printf(m, "VPE\t\t\t: %d\n", cpu_data[n].vpe_id);
#endif

    sprintf(fmt, "VCE%%c exceptions\t\t: %s\n",
            cpu_has_vce ? "%u" : "not available");
    seq_printf(m, fmt, 'D', vced_count);
    seq_printf(m, fmt, 'I', vcei_count);

    proc_cpuinfo_notifier_args.m = m;
    proc_cpuinfo_notifier_args.n = n;

    raw_notifier_call_chain(&proc_cpuinfo_chain, 0,
                            &proc_cpuinfo_notifier_args);

    seq_printf(m, "\n");

    return 0;
}
/* Read a chunk (page) from NAND.
 *
 * Caller expects ExtendedTags data to be usable even on error; that is,
 * all members except ecc_result and block_bad are zeroed.
 *
 *  - Check ECC results for data (if applicable)
 *  - Check for blank/erased block (return empty ExtendedTags if blank)
 *  - Check the packed_tags1 mini-ECC (correct if necessary/possible)
 *  - Convert packed_tags1 to ExtendedTags
 *  - Update ecc_result and block_bad members to refect state.
 *
 * Returns YAFFS_OK or YAFFS_FAIL.
 */
int nandmtd1_read_chunk_tags(struct yaffs_dev *dev,
			     int nand_chunk, u8 *data,
			     struct yaffs_ext_tags *etags)
{
	struct mtd_info *mtd = yaffs_dev_to_mtd(dev);
	int chunk_bytes = dev->data_bytes_per_chunk;
	loff_t addr = ((loff_t) nand_chunk) * chunk_bytes;
	int eccres = YAFFS_ECC_RESULT_NO_ERROR;
	struct mtd_oob_ops ops;
	struct yaffs_packed_tags1 pt1;
	int retval;
	int deleted;

	memset(&ops, 0, sizeof(ops));
	ops.mode = MTD_OOB_AUTO;
	ops.len = (data) ? chunk_bytes : 0;
	ops.ooblen = YTAG1_SIZE;
	ops.datbuf = data;
	ops.oobbuf = (u8 *) &pt1;

#if (MTD_VERSION_CODE < MTD_VERSION(2, 6, 20))
	/* In MTD 2.6.18 to 2.6.19 nand_base.c:nand_do_read_oob() has a bug;
	 * help it out with ops.len = ops.ooblen when ops.datbuf == NULL.
	 */
	ops.len = (ops.datbuf) ? ops.len : ops.ooblen;
#endif
	/* Read page and oob using MTD.
	 * Check status and determine ECC result.
	 */
	retval = mtd->read_oob(mtd, addr, &ops);
	if (retval)
		yaffs_trace(YAFFS_TRACE_MTD,
			"read_oob failed, chunk %d, mtd error %d",
			nand_chunk, retval);

	switch (retval) {
	case 0:
		/* no error */
		break;

	case -EUCLEAN:
		/* MTD's ECC fixed the data */
		eccres = YAFFS_ECC_RESULT_FIXED;
		dev->n_ecc_fixed++;
		break;

	case -EBADMSG:
		/* MTD's ECC could not fix the data */
		dev->n_ecc_unfixed++;
		/* fall into... */
	default:
		rettags(etags, YAFFS_ECC_RESULT_UNFIXED, 0);
		etags->block_bad = (mtd->block_isbad) (mtd, addr);
		return YAFFS_FAIL;
	}

	/* Check for a blank/erased chunk.
	 */
	if (yaffs_check_ff((u8 *) &pt1, 8)) {
		/* when blank, upper layers want ecc_result to be <= NO_ERROR */
		return rettags(etags, YAFFS_ECC_RESULT_NO_ERROR, YAFFS_OK);
	}
#ifndef CONFIG_YAFFS_9BYTE_TAGS
	/* Read deleted status (bit) then return it to it's non-deleted
	 * state before performing tags mini-ECC check. pt1.deleted is
	 * inverted.
	 */
	deleted = !pt1.deleted;
	pt1.deleted = 1;
#else
	deleted = (hweight8(((u8 *) &pt1)[8]) < 7);
#endif

	/* Check the packed tags mini-ECC and correct if necessary/possible.
	 */
	retval = yaffs_check_tags_ecc((struct yaffs_tags *)&pt1);
	switch (retval) {
	case 0:
		/* no tags error, use MTD result */
		break;
	case 1:
		/* recovered tags-ECC error */
		dev->n_tags_ecc_fixed++;
		if (eccres == YAFFS_ECC_RESULT_NO_ERROR)
			eccres = YAFFS_ECC_RESULT_FIXED;
		break;
	default:
		/* unrecovered tags-ECC error */
		dev->n_tags_ecc_unfixed++;
		return rettags(etags, YAFFS_ECC_RESULT_UNFIXED, YAFFS_FAIL);
	}

	/* Unpack the tags to extended form and set ECC result.
	 * [set should_be_ff just to keep yaffs_unpack_tags1 happy]
	 */
	pt1.should_be_ff = 0xFFFFFFFF;
	yaffs_unpack_tags1(etags, &pt1);
	etags->ecc_result = eccres;

	/* Set deleted state */
	etags->is_deleted = deleted;
	return YAFFS_OK;
}
Esempio n. 28
0
static int sh_tmu_setup(struct sh_tmu_device *tmu, struct platform_device *pdev)
{
	unsigned int i;
	int ret;

	tmu->pdev = pdev;

	raw_spin_lock_init(&tmu->lock);

	if (IS_ENABLED(CONFIG_OF) && pdev->dev.of_node) {
		ret = sh_tmu_parse_dt(tmu);
		if (ret < 0)
			return ret;
	} else if (pdev->dev.platform_data) {
		const struct platform_device_id *id = pdev->id_entry;
		struct sh_timer_config *cfg = pdev->dev.platform_data;

		tmu->model = id->driver_data;
		tmu->num_channels = hweight8(cfg->channels_mask);
	} else {
		dev_err(&tmu->pdev->dev, "missing platform data\n");
		return -ENXIO;
	}

	/* Get hold of clock. */
	tmu->clk = clk_get(&tmu->pdev->dev, "fck");
	if (IS_ERR(tmu->clk)) {
		dev_err(&tmu->pdev->dev, "cannot get clock\n");
		return PTR_ERR(tmu->clk);
	}

	ret = clk_prepare(tmu->clk);
	if (ret < 0)
		goto err_clk_put;

	/* Map the memory resource. */
	ret = sh_tmu_map_memory(tmu);
	if (ret < 0) {
		dev_err(&tmu->pdev->dev, "failed to remap I/O memory\n");
		goto err_clk_unprepare;
	}

	/* Allocate and setup the channels. */
	tmu->channels = kzalloc(sizeof(*tmu->channels) * tmu->num_channels,
				GFP_KERNEL);
	if (tmu->channels == NULL) {
		ret = -ENOMEM;
		goto err_unmap;
	}

	/*
	 * Use the first channel as a clock event device and the second channel
	 * as a clock source.
	 */
	for (i = 0; i < tmu->num_channels; ++i) {
		ret = sh_tmu_channel_setup(&tmu->channels[i], i,
					   i == 0, i == 1, tmu);
		if (ret < 0)
			goto err_unmap;
	}

	platform_set_drvdata(pdev, tmu);

	return 0;

err_unmap:
	kfree(tmu->channels);
	iounmap(tmu->mapbase);
err_clk_unprepare:
	clk_unprepare(tmu->clk);
err_clk_put:
	clk_put(tmu->clk);
	return ret;
}
Esempio n. 29
0
static int show_cpuinfo(struct seq_file *m, void *v)
{
	unsigned long n = (unsigned long) v - 1;
	unsigned int version = cpu_data[n].processor_id;
	unsigned int fp_vers = cpu_data[n].fpu_id;
	char fmt [64];
	int i;

	struct thread_struct *mc_thread = &current->thread;

#ifdef CONFIG_SMP
	if (!cpu_isset(n, cpu_online_map))
		return 0;
#endif

/* For Magiccode */
        /* For Magiccode, when current process uses emulated ARM native code: */
        if (mc_thread->mcflags != CPU_MIPS) {
                /* mimic cpuinfo of 2012 Nexus7 with NVidia Tegra3 T30L cpu */
                seq_printf(m, "Processor\t: ARMv7 Processor rev 9 (v7l)\n");
                seq_printf(m, "processor\t: 0\n");
                seq_printf(m, "BogoMIPS\t: 1001.88\n\n");
                /* show just one processor core */
                seq_printf(m, "Features\t: swp half thumb fastmult vfp ");
                if (mc_thread->mcflags == CPU_ARM_NEON)
                        seq_printf(m, "edsp neon ");
                seq_printf(m, "vfpv3\n");
                /* no thumbee or tls feature */
                seq_printf(m, "CPU implementer\t: 0x41\n");  /* ARM */
                seq_printf(m, "CPU architecture: 7\n");
                seq_printf(m, "CPU variant\t: 0x2\n");
                seq_printf(m, "CPU part\t: 0xc09\n");  /* Cortex-A9 */
                seq_printf(m, "CPU revision\t: 9\n");
                seq_printf(m, "\n");
                return 0;
        }
	else {
		
		/*
		 * For the first processor also print the system type
		 */
		unsigned int detected = 0;
		rcu_read_lock();
		if ((strcmp(current->parent->comm, "tv.apad:vplayer") == 0) ||
			(strcmp(current->comm, "tv.apad:vplayer") == 0)) {
			detected = 1;
		}
		rcu_read_unlock();
		if (n == 0) {
			seq_printf(m, "system type\t\t: %s\n", get_system_type());
			if (mips_get_machine_name())
				seq_printf(m, "machine\t\t\t: %s\n",
					   mips_get_machine_name());
		}
		if (detected == 0) {
			seq_printf(m, "processor\t\t: %ld\n", n);
		} else {
			seq_printf(m, "processor\t\t: ARMv7 swp half thumb fastmult vfp edsp neon vfpv3 %ld\n ", n);
		}
		sprintf(fmt, "cpu model\t\t: %%s V%%d.%%d%s\n",
			cpu_data[n].options & MIPS_CPU_FPU ? "  FPU V%d.%d" : "");
		seq_printf(m, fmt, __cpu_name[n],
			   (version >> 4) & 0x0f, version & 0x0f,
			   (fp_vers >> 4) & 0x0f, fp_vers & 0x0f);
		seq_printf(m, "BogoMIPS\t\t: %u.%02u\n",
			   cpu_data[n].udelay_val / (500000/HZ),
			   (cpu_data[n].udelay_val / (5000/HZ)) % 100);
		seq_printf(m, "wait instruction\t: %s\n", cpu_wait ? "yes" : "no");
		if (detected == 0) {
			seq_printf(m, "microsecond timers\t: %s\n",
						cpu_has_counter ? "yes" : "no");
		}
		seq_printf(m, "tlb_entries\t\t: %d\n", cpu_data[n].tlbsize);
		seq_printf(m, "extra interrupt vector\t: %s\n",
			   cpu_has_divec ? "yes" : "no");
		seq_printf(m, "hardware watchpoint\t: %s",
			   cpu_has_watch ? "yes, " : "no\n");
		if (cpu_has_watch) {
			seq_printf(m, "count: %d, address/irw mask: [",
				   cpu_data[n].watch_reg_count);
			for (i = 0; i < cpu_data[n].watch_reg_count; i++)
				seq_printf(m, "%s0x%04x", i ? ", " : "" ,
					   cpu_data[n].watch_reg_masks[i]);
			seq_printf(m, "]\n");
		}
		seq_printf(m, "microMIPS\t\t: %s\n", cpu_has_mmips ? "yes" : "no");
		seq_printf(m, "ASEs implemented\t:%s%s%s%s%s%s%s\n",
			   cpu_has_mips16 ? " mips16" : "",
			   cpu_has_mdmx ? " mdmx" : "",
			   cpu_has_mips3d ? " mips3d" : "",
			   cpu_has_smartmips ? " smartmips" : "",
			   cpu_has_dsp ? " dsp" : "",
			   cpu_has_mipsmt ? " mt" : "",
			   cpu_has_mxu ? " mxu" : ""
			);
		seq_printf(m, "shadow register sets\t: %d\n",
			   cpu_data[n].srsets);
		seq_printf(m, "kscratch registers\t: %d\n",
			   hweight8(cpu_data[n].kscratch_mask));
		seq_printf(m, "core\t\t\t: %d\n", cpu_data[n].core);
		
		sprintf(fmt, "VCE%%c exceptions\t\t: %s\n",
			cpu_has_vce ? "%u" : "not available");
		seq_printf(m, fmt, 'D', vced_count);
		seq_printf(m, fmt, 'I', vcei_count);
		
		/* Android requires 'Hardware' to setup the init.%hardware%.rc */
		seq_printf(m, "Hardware\t\t: %s\n", get_board_type());
		
		seq_printf(m, "\n");
	}
	return 0;
}