static bool migrate_one_irq(struct irq_desc *desc)
{
	struct irq_data *d = irq_desc_get_irq_data(desc);
	const struct cpumask *affinity = d->common->affinity;
	struct irq_chip *c;
	bool ret = false;

	/*
	 * If this is a per-CPU interrupt, or the affinity does not
	 * include this CPU, then we have nothing to do.
	 */
	if (irqd_is_per_cpu(d) ||
	    !cpumask_test_cpu(smp_processor_id(), affinity))
		return false;

	if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) {
		affinity = cpu_online_mask;
		ret = true;
	}

	c = irq_data_get_irq_chip(d);
	if (!c->irq_set_affinity) {
		pr_warn_ratelimited("IRQ%u: unable to set affinity\n", d->irq);
	} else {
		int r = irq_do_set_affinity(d, affinity, false);
		if (r)
			pr_warn_ratelimited("IRQ%u: set affinity failed(%d).\n",
					    d->irq, r);
	}

	return ret;
}
static __be32 decode_compound_hdr_arg(struct xdr_stream *xdr, struct cb_compound_hdr_arg *hdr)
{
	__be32 *p;
	__be32 status;

	status = decode_string(xdr, &hdr->taglen, &hdr->tag);
	if (unlikely(status != 0))
		return status;
	/* We do not like overly long tags! */
	if (hdr->taglen > CB_OP_TAGLEN_MAXSZ - 12) {
		printk("NFS: NFSv4 CALLBACK %s: client sent tag of length %u\n",
				__func__, hdr->taglen);
		return htonl(NFS4ERR_RESOURCE);
	}
	p = read_buf(xdr, 12);
	if (unlikely(p == NULL))
		return htonl(NFS4ERR_RESOURCE);
	hdr->minorversion = ntohl(*p++);
	/* Check minor version is zero or one. */
	if (hdr->minorversion <= 1) {
		hdr->cb_ident = ntohl(*p++); /* ignored by v4.1 */
	} else {
		pr_warn_ratelimited("NFS: %s: NFSv4 server callback with "
			"illegal minor version %u!\n",
			__func__, hdr->minorversion);
		return htonl(NFS4ERR_MINOR_VERS_MISMATCH);
	}
	hdr->nops = ntohl(*p);
	dprintk("%s: minorversion %d nops %d\n", __func__,
		hdr->minorversion, hdr->nops);
	return 0;
}
Beispiel #3
0
static int determine_cipher_type(struct fscrypt_info *ci, struct inode *inode,
				 const char **cipher_str_ret, int *keysize_ret)
{
	u32 mode;

	if (!fscrypt_valid_enc_modes(ci->ci_data_mode, ci->ci_filename_mode)) {
		pr_warn_ratelimited("fscrypt: inode %lu uses unsupported encryption modes (contents mode %d, filenames mode %d)\n",
				    inode->i_ino,
				    ci->ci_data_mode, ci->ci_filename_mode);
		return -EINVAL;
	}

	if (S_ISREG(inode->i_mode)) {
		mode = ci->ci_data_mode;
	} else if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode)) {
		mode = ci->ci_filename_mode;
	} else {
		WARN_ONCE(1, "fscrypt: filesystem tried to load encryption info for inode %lu, which is not encryptable (file type %d)\n",
			  inode->i_ino, (inode->i_mode & S_IFMT));
		return -EINVAL;
	}

	*cipher_str_ret = available_modes[mode].cipher_str;
	*keysize_ret = available_modes[mode].keysize;
	return 0;
}
Beispiel #4
0
static void qpnpint_irq_unmask(struct irq_data *d)
{
	struct q_irq_data *irq_d = irq_data_get_irq_chip_data(d);
	struct q_chip_data *chip_d = irq_d->chip_d;
	struct q_perip_data *per_d = irq_d->per_d;
	int rc;

	pr_debug("hwirq %lu irq: %d\n", d->hwirq, d->irq);

	if (!chip_d->cb) {
		pr_warn_ratelimited("No arbiter on bus=%u slave=%u offset=%u\n",
				chip_d->bus_nr, irq_d->spmi_slave,
				irq_d->spmi_offset);
		return;
	}

	qpnpint_arbiter_op(d, irq_d, chip_d->cb->unmask);

	per_d->int_en |= irq_d->mask_shift;
	rc = qpnpint_spmi_write(irq_d, QPNPINT_REG_EN_SET,
					&irq_d->mask_shift, 1);
	if (rc) {
		pr_err("spmi failure on irq %d\n", d->irq);
		return;
	}
}
Beispiel #5
0
static void rds_recv_hs_exthdrs(struct rds_header *hdr,
				struct rds_connection *conn)
{
	unsigned int pos = 0, type, len;
	union {
		struct rds_ext_header_version version;
		u16 rds_npaths;
	} buffer;

	while (1) {
		len = sizeof(buffer);
		type = rds_message_next_extension(hdr, &pos, &buffer, &len);
		if (type == RDS_EXTHDR_NONE)
			break;
		/* Process extension header here */
		switch (type) {
		case RDS_EXTHDR_NPATHS:
			conn->c_npaths = min_t(int, RDS_MPATH_WORKERS,
					       buffer.rds_npaths);
			break;
		default:
			pr_warn_ratelimited("ignoring unknown exthdr type "
					     "0x%x\n", type);
		}
	}
	/* if RDS_EXTHDR_NPATHS was not found, default to a single-path */
	conn->c_npaths = max_t(int, conn->c_npaths, 1);
}
Beispiel #6
0
static int derive_essiv_salt(const u8 *key, int keysize, u8 *salt)
{
	struct crypto_shash *tfm = READ_ONCE(essiv_hash_tfm);

	/* init hash transform on demand */
	if (unlikely(!tfm)) {
		struct crypto_shash *prev_tfm;

		tfm = crypto_alloc_shash("sha256", 0, 0);
		if (IS_ERR(tfm)) {
			pr_warn_ratelimited("fscrypt: error allocating SHA-256 transform: %ld\n",
					    PTR_ERR(tfm));
			return PTR_ERR(tfm);
		}
		prev_tfm = cmpxchg(&essiv_hash_tfm, NULL, tfm);
		if (prev_tfm) {
			crypto_free_shash(tfm);
			tfm = prev_tfm;
		}
	}

	{
		SHASH_DESC_ON_STACK(desc, tfm);
		desc->tfm = tfm;
		desc->flags = 0;

		return crypto_shash_digest(desc, key, keysize, salt);
	}
}
Beispiel #7
0
static const struct bpf_func_proto *bpf_get_probe_write_proto(void)
{
	pr_warn_ratelimited("%s[%d] is installing a program with bpf_probe_write_user helper that may corrupt user memory!",
			    current->comm, task_pid_nr(current));

	return &bpf_probe_write_user_proto;
}
Beispiel #8
0
static
int xen_pcibk_enable_msi(struct xen_pcibk_device *pdev,
			 struct pci_dev *dev, struct xen_pci_op *op)
{
	struct xen_pcibk_dev_data *dev_data;
	int status;

	if (unlikely(verbose_request))
		printk(KERN_DEBUG DRV_NAME ": %s: enable MSI\n", pci_name(dev));

	status = pci_enable_msi(dev);

	if (status) {
		pr_warn_ratelimited("%s: error enabling MSI for guest %u: err %d\n",
				    pci_name(dev), pdev->xdev->otherend_id,
				    status);
		op->value = 0;
		return XEN_PCI_ERR_op_failed;
	}

	/* The value the guest needs is actually the IDT vector, not the
	 * the local domain's IRQ number. */

	op->value = dev->irq ? xen_pirq_from_irq(dev->irq) : 0;
	if (unlikely(verbose_request))
		printk(KERN_DEBUG DRV_NAME ": %s: MSI: %d\n", pci_name(dev),
			op->value);

	dev_data = pci_get_drvdata(dev);
	if (dev_data)
		dev_data->ack_intr = 0;

	return 0;
}
Beispiel #9
0
static int xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
	struct xenvif *vif = netdev_priv(dev);
	struct xenvif_queue *queue = NULL;
	unsigned int num_queues;
	u16 index;
	struct xenvif_rx_cb *cb;

	BUG_ON(skb->dev != dev);

	/* Drop the packet if queues are not set up.
	 * This handler should be called inside an RCU read section
	 * so we don't need to enter it here explicitly.
	 */
	num_queues = READ_ONCE(vif->num_queues);
	if (num_queues < 1)
		goto drop;

	/* Obtain the queue to be used to transmit this packet */
	index = skb_get_queue_mapping(skb);
	if (index >= num_queues) {
		pr_warn_ratelimited("Invalid queue %hu for packet on interface %s\n.",
				    index, vif->dev->name);
		index %= num_queues;
	}
	queue = &vif->queues[index];

	/* Drop the packet if queue is not ready */
	if (queue->task == NULL ||
	    queue->dealloc_task == NULL ||
	    !xenvif_schedulable(vif))
		goto drop;

	if (vif->multicast_control && skb->pkt_type == PACKET_MULTICAST) {
		struct ethhdr *eth = (struct ethhdr *)skb->data;

		if (!xenvif_mcast_match(vif, eth->h_dest))
			goto drop;
	}

	cb = XENVIF_RX_CB(skb);
	cb->expires = jiffies + vif->drain_timeout;

	/* If there is no hash algorithm configured then make sure there
	 * is no hash information in the socket buffer otherwise it
	 * would be incorrectly forwarded to the frontend.
	 */
	if (vif->hash.alg == XEN_NETIF_CTRL_HASH_ALGORITHM_NONE)
		skb_clear_hash(skb);

	xenvif_rx_queue_tail(queue, skb);
	xenvif_kick_thread(queue);

	return NETDEV_TX_OK;

 drop:
	vif->dev->stats.tx_dropped++;
	dev_kfree_skb(skb);
	return NETDEV_TX_OK;
}
Beispiel #10
0
static int handle_dlpar_errorlog(struct pseries_hp_errorlog *hp_elog)
{
	int rc;

	/* pseries error logs are in BE format, convert to cpu type */
	switch (hp_elog->id_type) {
	case PSERIES_HP_ELOG_ID_DRC_COUNT:
		hp_elog->_drc_u.drc_count =
					be32_to_cpu(hp_elog->_drc_u.drc_count);
		break;
	case PSERIES_HP_ELOG_ID_DRC_INDEX:
		hp_elog->_drc_u.drc_index =
					be32_to_cpu(hp_elog->_drc_u.drc_index);
	}

	switch (hp_elog->resource) {
	case PSERIES_HP_ELOG_RESOURCE_MEM:
		rc = dlpar_memory(hp_elog);
		break;
	default:
		pr_warn_ratelimited("Invalid resource (%d) specified\n",
				    hp_elog->resource);
		rc = -EINVAL;
	}

	return rc;
}
Beispiel #11
0
/**
 * omap_vp_disable() - API to disable a particular VP
 * @voltdm:	pointer to the VDD whose VP is to be disabled.
 *
 * This API disables a particular voltage processor. Needed by the smartreflex
 * class drivers.
 */
void omap_vp_disable(struct voltagedomain *voltdm)
{
	struct omap_vp_instance *vp;
	u32 vpconfig;

	if (IS_ERR_OR_NULL(voltdm)) {
		pr_err("%s: VDD specified does not exist!\n", __func__);
		return;
	}

	vp = voltdm->vp;
	if (IS_ERR_OR_NULL(vp)) {
		pr_err("%s: No VP info for vdd_%s\n", __func__, voltdm->name);
		return;
	}

	if (!voltdm->read || !voltdm->write) {
		pr_err("%s: No read/write API for accessing vdd_%s regs\n",
			__func__, voltdm->name);
		return;
	}

	/* If VP is already disabled, do nothing. Return */
	if (!vp->enabled) {
		pr_warning("%s: Trying to disable VP for vdd_%s when"
			"it is already disabled\n", __func__, voltdm->name);
		return;
	}

	if (_vp_wait_for_idle(voltdm, vp)) {
		pr_warn_ratelimited("%s: vdd_%s timedout!Ignore and try\n",
				    __func__, voltdm->name);
	}
	/* Disable VP */
	vpconfig = voltdm->read(vp->vpconfig);
	vpconfig &= ~vp->common->vpconfig_vpenable;
	voltdm->write(vpconfig, vp->vpconfig);

	if (_vp_wait_for_idle(voltdm, vp)) {
		pr_warn_ratelimited("%s: vdd_%s timedout after disable!!\n",
				    __func__, voltdm->name);
	}

	vp->enabled = false;

	return;
}
Beispiel #12
0
static int msm_fault_handler(struct iommu_domain *domain, struct device *dev,
		unsigned long iova, int flags, void *arg)
{
	struct msm_iommu *iommu = arg;
	if (iommu->base.handler)
		return iommu->base.handler(iommu->base.arg, iova, flags);
	pr_warn_ratelimited("*** fault: iova=%08lx, flags=%d\n", iova, flags);
	return 0;
}
Beispiel #13
0
static int ovl_check_fd(const void *data, struct file *f, unsigned int fd)
{
	const struct dentry *dentry = data;

	if (file_inode(f) == d_inode(dentry))
		pr_warn_ratelimited("overlayfs: Warning: Copying up %pD, but open R/O on fd %u which will cease to be coherent [pid=%d %s]\n",
				    f, fd, current->pid, current->comm);
	return 0;
}
Beispiel #14
0
/*
 * handle_IRQ handles all hardware IRQ's.  Decoded IRQs should
 * not come via this function.  Instead, they should provide their
 * own 'handler'.  Used by platform code implementing C-based 1st
 * level decoding.
 */
void handle_IRQ(unsigned int irq, struct pt_regs *regs)
{
	struct pt_regs *old_regs = set_irq_regs(regs);

#ifdef CONFIG_HISI_RDR
#ifdef CONFIG_HISI_RDR_SWITCH
	unsigned int old_int_num = curr_int_num;

	curr_int_num = irq;

	if (NULL != int_switch_hook) {/*exc int hook func*/
		int_switch_hook(0, old_int_num, curr_int_num);
		int_switch_flag = 1;
	}
#endif
#else
	unsigned int old_int_num = curr_int_num;

	curr_int_num = irq;

	if (NULL != int_switch_hook) {/*exc int hook func*/
		int_switch_hook(0, old_int_num, curr_int_num);
		int_switch_flag = 1;
	}
#endif


	irq_enter();

	/*
	 * Some hardware gives randomly wrong interrupts.  Rather
	 * than crashing, do something sensible.
	 */
	if (unlikely(irq >= nr_irqs)) {
		pr_warn_ratelimited("Bad IRQ%u\n", irq);
		ack_bad_irq(irq);
	} else {
		generic_handle_irq(irq);
	}

	irq_exit();

#ifdef CONFIG_HISI_RDR
#ifdef CONFIG_HISI_RDR_SWITCH
	/*call exception interrupt hook func*/
	if ((NULL != int_switch_hook) && (0 != int_switch_flag))
		int_switch_hook(1, old_int_num, curr_int_num);
#endif
#else
	/*call exception interrupt hook func*/
	if ((NULL != int_switch_hook) && (0 != int_switch_flag))
		int_switch_hook(1, old_int_num, curr_int_num);
#endif

	set_irq_regs(old_regs);
}
Beispiel #15
0
static void rsp_cgr_cb(struct qman_portal *qm, struct qman_cgr *cgr,
			int congested)
{
	caam_congested = congested;

	if (congested)
		pr_warn_ratelimited("CAAM rsp path congested\n");
	else
		pr_info_ratelimited("CAAM rsp path congestion state exit\n");
}
static void qpnpint_irq_unmask(struct irq_data *d)
{
	struct q_irq_data *irq_d = irq_data_get_irq_chip_data(d);
	struct q_chip_data *chip_d = irq_d->chip_d;
	struct q_perip_data *per_d = irq_d->per_d;
	int rc;
	uint8_t buf[2];
	uint8_t prev_int_en;

	pr_debug("hwirq %lu irq: %d\n", d->hwirq, d->irq);

	if (!chip_d->cb) {
		pr_warn_ratelimited("No arbiter on bus=%u slave=%u offset=%u\n",
				chip_d->bus_nr, irq_d->spmi_slave,
				irq_d->spmi_offset);
		return;
	}

	spin_lock(&per_d->lock);
	prev_int_en = per_d->int_en;
	per_d->int_en |= irq_d->mask_shift;
	if (!prev_int_en && per_d->int_en) {
		/*
		 * no interrupt prior to this call was enabled for the
		 * peripheral. Ask the arbiter to enable interrupts for
		 * this peripheral
		 */
		qpnpint_arbiter_op(d, irq_d, chip_d->cb->unmask);
	}
	spin_unlock(&per_d->lock);

	/* Check the current state of the interrupt enable bit. */
	rc = qpnpint_spmi_read(irq_d, QPNPINT_REG_EN_SET, buf, 1);
	if (rc) {
		pr_err("SPMI read failure for IRQ %d, rc=%d\n", d->irq, rc);
		return;
	}

	if (!(buf[0] & irq_d->mask_shift)) {
		/*
		 * Since the interrupt is currently disabled, write to both the
		 * LATCHED_CLR and EN_SET registers so that a spurious interrupt
		 * cannot be triggered when the interrupt is enabled.
		 */
		buf[0] = irq_d->mask_shift;
		buf[1] = irq_d->mask_shift;
		rc = qpnpint_spmi_write(irq_d, QPNPINT_REG_LATCHED_CLR, buf, 2);
		if (rc) {
			pr_err("SPMI write failure for IRQ %d, rc=%d\n", d->irq,
				rc);
			return;
		}
	}
}
static
int xen_pcibk_enable_msix(struct xen_pcibk_device *pdev,
			  struct pci_dev *dev, struct xen_pci_op *op)
{
	struct xen_pcibk_dev_data *dev_data;
	int i, result;
	struct msix_entry *entries;

	if (unlikely(verbose_request))
		printk(KERN_DEBUG DRV_NAME ": %s: enable MSI-X\n",
		       pci_name(dev));
	if (op->value > SH_INFO_MAX_VEC)
		return -EINVAL;

	entries = kmalloc(op->value * sizeof(*entries), GFP_KERNEL);
	if (entries == NULL)
		return -ENOMEM;

	for (i = 0; i < op->value; i++) {
		entries[i].entry = op->msix_entries[i].entry;
		entries[i].vector = op->msix_entries[i].vector;
	}

	result = pci_enable_msix(dev, entries, op->value);

	if (result == 0) {
		for (i = 0; i < op->value; i++) {
			op->msix_entries[i].entry = entries[i].entry;
			if (entries[i].vector)
				op->msix_entries[i].vector =
					xen_pirq_from_irq(entries[i].vector);
				if (unlikely(verbose_request))
					printk(KERN_DEBUG DRV_NAME ": %s: " \
						"MSI-X[%d]: %d\n",
						pci_name(dev), i,
						op->msix_entries[i].vector);
		}
	} else
		pr_warn_ratelimited("%s: error enabling MSI-X for guest %u: err %d!\n",
				    pci_name(dev), pdev->xdev->otherend_id,
				    result);
	kfree(entries);

	op->value = result;
	dev_data = pci_get_drvdata(dev);
	if (dev_data)
		dev_data->ack_intr = 0;

	return result > 0 ? 0 : result;
}
Beispiel #18
0
static int xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
    struct xenvif *vif = netdev_priv(dev);
    struct xenvif_queue *queue = NULL;
    unsigned int num_queues = vif->num_queues;
    u16 index;
    struct xenvif_rx_cb *cb;

    BUG_ON(skb->dev != dev);

    /* Drop the packet if queues are not set up */
    if (num_queues < 1)
        goto drop;

    /* Obtain the queue to be used to transmit this packet */
    index = skb_get_queue_mapping(skb);
    if (index >= num_queues) {
        pr_warn_ratelimited("Invalid queue %hu for packet on interface %s\n.",
                            index, vif->dev->name);
        index %= num_queues;
    }
    queue = &vif->queues[index];

    /* Drop the packet if queue is not ready */
    if (queue->task == NULL ||
            queue->dealloc_task == NULL ||
            !xenvif_schedulable(vif))
        goto drop;

    if (vif->multicast_control && skb->pkt_type == PACKET_MULTICAST) {
        struct ethhdr *eth = (struct ethhdr *)skb->data;

        if (!xenvif_mcast_match(vif, eth->h_dest))
            goto drop;
    }

    cb = XENVIF_RX_CB(skb);
    cb->expires = jiffies + vif->drain_timeout;

    xenvif_rx_queue_tail(queue, skb);
    xenvif_kick_thread(queue);

    return NETDEV_TX_OK;

drop:
    vif->dev->stats.tx_dropped++;
    dev_kfree_skb(skb);
    return NETDEV_TX_OK;
}
Beispiel #19
0
static int qtnf_tx_queue_ready(struct qtnf_pcie_bus_priv *priv)
{
	if (!CIRC_SPACE(priv->tx_bd_w_index, priv->tx_bd_r_index,
			priv->tx_bd_num)) {
		qtnf_pcie_data_tx_reclaim(priv);

		if (!CIRC_SPACE(priv->tx_bd_w_index, priv->tx_bd_r_index,
				priv->tx_bd_num)) {
			pr_warn_ratelimited("reclaim full Tx queue\n");
			priv->tx_full_count++;
			return 0;
		}
	}

	return 1;
}
Beispiel #20
0
Datei: dir.c Projekt: Lyude/linux
/*
 * Common operations required to be done after creation of file on upper.
 * If @hardlink is false, then @inode is a pre-allocated inode, we may or
 * may not use to instantiate the new dentry.
 */
static int ovl_instantiate(struct dentry *dentry, struct inode *inode,
			   struct dentry *newdentry, bool hardlink)
{
	struct ovl_inode_params oip = {
		.upperdentry = newdentry,
		.newinode = inode,
	};

	ovl_dir_modified(dentry->d_parent, false);
	ovl_dentry_set_upper_alias(dentry);
	if (!hardlink) {
		/*
		 * ovl_obtain_alias() can be called after ovl_create_real()
		 * and before we get here, so we may get an inode from cache
		 * with the same real upperdentry that is not the inode we
		 * pre-allocated.  In this case we will use the cached inode
		 * to instantiate the new dentry.
		 *
		 * XXX: if we ever use ovl_obtain_alias() to decode directory
		 * file handles, need to use ovl_get_inode_locked() and
		 * d_instantiate_new() here to prevent from creating two
		 * hashed directory inode aliases.
		 */
		inode = ovl_get_inode(dentry->d_sb, &oip);
		if (WARN_ON(IS_ERR(inode)))
			return PTR_ERR(inode);
	} else {
		WARN_ON(ovl_inode_real(inode) != d_inode(newdentry));
		dput(newdentry);
		inc_nlink(inode);
	}

	d_instantiate(dentry, inode);
	if (inode != oip.newinode) {
		pr_warn_ratelimited("overlayfs: newly created inode found in cache (%pd2)\n",
				    dentry);
	}

	/* Force lookup of new upper hardlink to find its lower */
	if (hardlink)
		d_drop(dentry);

	return 0;
}
Beispiel #21
0
Datei: irq.c Projekt: 01org/prd
/*
 * The current CPU has been marked offline.  Migrate IRQs off this CPU.
 * If the affinity settings do not allow other CPUs, force them onto any
 * available CPU.
 *
 * Note: we must iterate over all IRQs, whether they have an attached
 * action structure or not, as we need to get chained interrupts too.
 */
void migrate_irqs(void)
{
	unsigned int i;
	struct irq_desc *desc;
	unsigned long flags;

	local_irq_save(flags);

	for_each_irq_desc(i, desc) {
		bool affinity_broken;

		raw_spin_lock(&desc->lock);
		affinity_broken = migrate_one_irq(desc);
		raw_spin_unlock(&desc->lock);

		if (affinity_broken)
			pr_warn_ratelimited("IRQ%u no longer affine to CPU%u\n",
					    i, smp_processor_id());
	}
Beispiel #22
0
/**
 * _vp_wait_for_idle() - wait for voltage processor to idle
 * @voltdm:	voltage domain
 * @vp:		voltage processor instance
 *
 * In some conditions, it is important to ensure that Voltage Processor
 * is idle before performing operations on the Voltage Processor(VP).
 * This is primarily to ensure that VP state machine does not enter into
 * invalid state.
 *
 * Returns -ETIMEDOUT if timeout occurs - This could be critical failure
 * as it indicates that Voltage processor might have it's state machine
 * stuck up without recovering out(theoretically should never happen
 * ofcourse). Returns 0 if idle state is detected.
 *
 * Note: callers are expected to ensure requisite checks are performed
 * on the pointers passed.
 */
static inline int _vp_wait_for_idle(struct voltagedomain *voltdm,
				    struct omap_vp_instance *vp)
{
	int timeout;

	omap_test_timeout((voltdm->read(vp->vstatus) &
			   vp->common->vstatus_vpidle), VP_IDLE_TIMEOUT,
			  timeout);

	if (timeout >= VP_IDLE_TIMEOUT) {
		/* Dont spam the console but ensure we catch attention */
		pr_warn_ratelimited("%s: vdd_%s idle timedout\n",
				    __func__, voltdm->name);
		WARN_ONCE("vdd_%s idle timedout\n", voltdm->name);

		return -ETIMEDOUT;
	}

	return 0;
}
Beispiel #23
0
/*
 * handle_IRQ handles all hardware IRQ's.  Decoded IRQs should
 * not come via this function.  Instead, they should provide their
 * own 'handler'.  Used by platform code implementing C-based 1st
 * level decoding.
 */
void handle_IRQ(unsigned int irq, struct pt_regs *regs)
{
    struct pt_regs *old_regs = set_irq_regs(regs);

    irq_enter();

    /*
     * Some hardware gives randomly wrong interrupts.  Rather
     * than crashing, do something sensible.
     */
    if (unlikely(irq >= nr_irqs)) {
        pr_warn_ratelimited("Bad IRQ%u\n", irq);
        ack_bad_irq(irq);
    } else {
        generic_handle_irq(irq);
    }

    irq_exit();
    set_irq_regs(old_regs);
}
Beispiel #24
0
static int qpnpint_arbiter_op(struct irq_data *d,
			      struct q_irq_data *irq_d,
			      int (*arb_op)(struct spmi_controller *,
					    struct qpnp_irq_spec *,
					    uint32_t))

{
	struct q_chip_data *chip_d = irq_d->chip_d;
	struct qpnp_irq_spec q_spec;
	int rc;

	if (!arb_op)
		return 0;

	if (!chip_d->cb->register_priv_data) {
		pr_warn_ratelimited("No ability to register arbiter registration data\n");
		return -ENODEV;
	}

	rc = qpnpint_decode_hwirq(d->hwirq, &q_spec);
	if (rc) {
		pr_err_ratelimited("%s: decode failed on hwirq %lu\n",
							__func__, d->hwirq);
		return rc;
	} else {
		if (irq_d->priv_d == QPNPINT_INVALID_DATA) {
			rc = chip_d->cb->register_priv_data(chip_d->spmi_ctrl,
						&q_spec, &irq_d->priv_d);
			if (rc) {
				pr_err_ratelimited(
					"%s: decode failed on hwirq %lu\n",
					__func__, d->hwirq);
				return rc;
			}

		}
		arb_op(chip_d->spmi_ctrl, &q_spec, irq_d->priv_d);
	}

	return 0;
}
static void qpnpint_irq_mask(struct irq_data *d)
{
	struct q_irq_data *irq_d = irq_data_get_irq_chip_data(d);
	struct q_chip_data *chip_d = irq_d->chip_d;
	struct q_perip_data *per_d = irq_d->per_d;
	int rc;
	uint8_t prev_int_en;

	pr_debug("hwirq %lu irq: %d\n", d->hwirq, d->irq);

	if (!chip_d->cb) {
		pr_warn_ratelimited("No arbiter on bus=%u slave=%u offset=%u\n",
				chip_d->bus_nr, irq_d->spmi_slave,
				irq_d->spmi_offset);
		return;
	}

	spin_lock(&per_d->lock);
	prev_int_en = per_d->int_en;
	per_d->int_en &= ~irq_d->mask_shift;

	if (prev_int_en && !(per_d->int_en)) {
		/*
		 * no interrupt on this peripheral is enabled
		 * ask the arbiter to ignore this peripheral
		 */
		qpnpint_arbiter_op(d, irq_d, chip_d->cb->mask);
	}
	spin_unlock(&per_d->lock);

	rc = qpnpint_spmi_write(irq_d, QPNPINT_REG_EN_CLR,
					(u8 *)&irq_d->mask_shift, 1);
	if (rc) {
		pr_err_ratelimited("spmi failure on irq %d\n", d->irq);
		return;
	}

	pr_debug("done hwirq %lu irq: %d\n", d->hwirq, d->irq);
}
Beispiel #26
0
unsigned int ovl_get_nlink(struct dentry *lowerdentry,
			   struct dentry *upperdentry,
			   unsigned int fallback)
{
	int nlink_diff;
	int nlink;
	char buf[13];
	int err;

	if (!lowerdentry || !upperdentry || d_inode(lowerdentry)->i_nlink == 1)
		return fallback;

	err = vfs_getxattr(upperdentry, OVL_XATTR_NLINK, &buf, sizeof(buf) - 1);
	if (err < 0)
		goto fail;

	buf[err] = '\0';
	if ((buf[0] != 'L' && buf[0] != 'U') ||
	    (buf[1] != '+' && buf[1] != '-'))
		goto fail;

	err = kstrtoint(buf + 1, 10, &nlink_diff);
	if (err < 0)
		goto fail;

	nlink = d_inode(buf[0] == 'L' ? lowerdentry : upperdentry)->i_nlink;
	nlink += nlink_diff;

	if (nlink <= 0)
		goto fail;

	return nlink;

fail:
	pr_warn_ratelimited("overlayfs: failed to get index nlink (%pd2, err=%i)\n",
			    upperdentry, err);
	return fallback;
}
Beispiel #27
0
/**
 * irq_migrate_all_off_this_cpu - Migrate irqs away from offline cpu
 *
 * The current CPU has been marked offline.  Migrate IRQs off this CPU.
 * If the affinity settings do not allow other CPUs, force them onto any
 * available CPU.
 *
 * Note: we must iterate over all IRQs, whether they have an attached
 * action structure or not, as we need to get chained interrupts too.
 */
void irq_migrate_all_off_this_cpu(void)
{
	unsigned int irq;
	struct irq_desc *desc;
	unsigned long flags;

	local_irq_save(flags);

	for_each_active_irq(irq) {
		bool affinity_broken;

		desc = irq_to_desc(irq);
		raw_spin_lock(&desc->lock);
		affinity_broken = migrate_one_irq(desc);
		raw_spin_unlock(&desc->lock);

		if (affinity_broken)
			pr_warn_ratelimited("IRQ%u no longer affine to CPU%u\n",
					    irq, smp_processor_id());
	}

	local_irq_restore(flags);
}
Beispiel #28
0
static __be32 decode_compound_hdr_arg(struct xdr_stream *xdr, struct cb_compound_hdr_arg *hdr)
{
	__be32 *p;
	__be32 status;

	status = decode_string(xdr, &hdr->taglen, &hdr->tag, CB_OP_TAGLEN_MAXSZ);
	if (unlikely(status != 0))
		return status;
	p = read_buf(xdr, 12);
	if (unlikely(p == NULL))
		return htonl(NFS4ERR_RESOURCE);
	hdr->minorversion = ntohl(*p++);
	/* Check for minor version support */
	if (hdr->minorversion <= NFS4_MAX_MINOR_VERSION) {
		hdr->cb_ident = ntohl(*p++); /* ignored by v4.1 and v4.2 */
	} else {
		pr_warn_ratelimited("NFS: %s: NFSv4 server callback with "
			"illegal minor version %u!\n",
			__func__, hdr->minorversion);
		return htonl(NFS4ERR_MINOR_VERS_MISMATCH);
	}
	hdr->nops = ntohl(*p);
	return 0;
}
Beispiel #29
0
static int notrace ramoops_pstore_write_buf(enum pstore_type_id type,
					    enum kmsg_dump_reason reason,
					    u64 *id, unsigned int part,
					    const char *buf,
					    bool compressed, size_t size,
					    struct pstore_info *psi)
{
	struct ramoops_context *cxt = psi->data;
	struct persistent_ram_zone *prz;
	size_t hlen;

	if (type == PSTORE_TYPE_CONSOLE) {
		if (!cxt->cprz)
			return -ENOMEM;
		persistent_ram_write(cxt->cprz, buf, size);
		return 0;
	} else if (type == PSTORE_TYPE_FTRACE) {
		int zonenum;

		if (!cxt->fprzs)
			return -ENOMEM;
		/*
		 * Choose zone by if we're using per-cpu buffers.
		 */
		if (cxt->flags & RAMOOPS_FLAG_FTRACE_PER_CPU)
			zonenum = smp_processor_id();
		else
			zonenum = 0;

		persistent_ram_write(cxt->fprzs[zonenum], buf, size);
		return 0;
	} else if (type == PSTORE_TYPE_PMSG) {
		pr_warn_ratelimited("PMSG shouldn't call %s\n", __func__);
		return -EINVAL;
	}

	if (type != PSTORE_TYPE_DMESG)
		return -EINVAL;

	/* Out of the various dmesg dump types, ramoops is currently designed
	 * to only store crash logs, rather than storing general kernel logs.
	 */
	if (reason != KMSG_DUMP_OOPS &&
	    reason != KMSG_DUMP_PANIC)
		return -EINVAL;

	/* Skip Oopes when configured to do so. */
	if (reason == KMSG_DUMP_OOPS && !cxt->dump_oops)
		return -EINVAL;

	/* Explicitly only take the first part of any new crash.
	 * If our buffer is larger than kmsg_bytes, this can never happen,
	 * and if our buffer is smaller than kmsg_bytes, we don't want the
	 * report split across multiple records.
	 */
	if (part != 1)
		return -ENOSPC;

	if (!cxt->dprzs)
		return -ENOSPC;

	prz = cxt->dprzs[cxt->dump_write_cnt];

	hlen = ramoops_write_kmsg_hdr(prz, compressed);
	if (size + hlen > prz->buffer_size)
		size = prz->buffer_size - hlen;
	persistent_ram_write(prz, buf, size);

	cxt->dump_write_cnt = (cxt->dump_write_cnt + 1) % cxt->max_dump_cnt;

	return 0;
}
Beispiel #30
0
static int sdio_read_cis(struct mmc_card *card, struct sdio_func *func)
{
	int ret;
	struct sdio_func_tuple *this, **prev;
	unsigned i, ptr = 0;

	/*
	 * Note that this works for the common CIS (function number 0) as
	 * well as a function's CIS * since SDIO_CCCR_CIS and SDIO_FBR_CIS
	 * have the same offset.
	 */
	for (i = 0; i < 3; i++) {
		unsigned char x, fn;

		if (func)
			fn = func->num;
		else
			fn = 0;

		ret = mmc_io_rw_direct(card, 0, 0,
			SDIO_FBR_BASE(fn) + SDIO_FBR_CIS + i, 0, &x);
		if (ret)
			return ret;
		ptr |= x << (i * 8);
	}

	if (func)
		prev = &func->tuples;
	else
		prev = &card->tuples;

	if (*prev)
		return -EINVAL;

	do {
		unsigned char tpl_code, tpl_link;

		ret = mmc_io_rw_direct(card, 0, 0, ptr++, 0, &tpl_code);
		if (ret)
			break;

		/* 0xff means we're done */
		if (tpl_code == 0xff)
			break;

		/* null entries have no link field or data */
		if (tpl_code == 0x00)
			continue;

		ret = mmc_io_rw_direct(card, 0, 0, ptr++, 0, &tpl_link);
		if (ret)
			break;

		/* a size of 0xff also means we're done */
		if (tpl_link == 0xff)
			break;

		this = kmalloc(sizeof(*this) + tpl_link, GFP_KERNEL);
		if (!this)
			return -ENOMEM;

		for (i = 0; i < tpl_link; i++) {
			ret = mmc_io_rw_direct(card, 0, 0,
					       ptr + i, 0, &this->data[i]);
			if (ret)
				break;
		}
		if (ret) {
			kfree(this);
			break;
		}

		/* Try to parse the CIS tuple */
		ret = cis_tpl_parse(card, func, "CIS",
				    cis_tpl_list, ARRAY_SIZE(cis_tpl_list),
				    tpl_code, this->data, tpl_link);
		if (ret == -EILSEQ || ret == -ENOENT) {
			/*
			 * The tuple is unknown or known but not parsed.
			 * Queue the tuple for the function driver.
			 */
			this->next = NULL;
			this->code = tpl_code;
			this->size = tpl_link;
			*prev = this;
			prev = &this->next;

			if (ret == -ENOENT) {
				/* warn about unknown tuples */
				pr_warn_ratelimited("%s: queuing unknown"
				       " CIS tuple 0x%02x (%u bytes)\n",
				       mmc_hostname(card->host),
				       tpl_code, tpl_link);
			}

			/* keep on analyzing tuples */
			ret = 0;
		} else {
			/*
			 * We don't need the tuple anymore if it was
			 * successfully parsed by the SDIO core or if it is
			 * not going to be queued for a driver.
			 */
			kfree(this);
		}

		ptr += tpl_link;
	} while (!ret);

	/*
	 * Link in all unknown tuples found in the common CIS so that
	 * drivers don't have to go digging in two places.
	 */
	if (func)
		*prev = card->tuples;

	return ret;
}