Ejemplo n.º 1
0
static ssize_t qeth_dev_portname_store(struct device *dev,
		struct device_attribute *attr, const char *buf, size_t count)
{
	struct qeth_card *card = dev_get_drvdata(dev);

	dev_warn_once(&card->gdev->dev,
		      "portname is deprecated and is ignored\n");
	return count;
}
Ejemplo n.º 2
0
static int ds3232_suspend(struct device *dev)
{
	struct ds3232 *ds3232 = dev_get_drvdata(dev);

	if (device_may_wakeup(dev)) {
		if (enable_irq_wake(ds3232->irq))
			dev_warn_once(dev, "Cannot set wakeup source\n");
	}

	return 0;
}
Ejemplo n.º 3
0
static int usdhi6_sig_volt_switch(struct mmc_host *mmc, struct mmc_ios *ios)
{
	int ret;

	ret = mmc_regulator_set_vqmmc(mmc, ios);
	if (ret < 0)
		return ret;

	ret = usdhi6_set_pinstates(mmc_priv(mmc), ios->signal_voltage);
	if (ret)
		dev_warn_once(mmc_dev(mmc),
			      "Failed to set pinstate err=%d\n", ret);
	return ret;
}
Ejemplo n.º 4
0
static irqreturn_t rain_interrupt(struct serio *serio, unsigned char data,
				    unsigned int flags)
{
	struct rain *rain = serio_get_drvdata(serio);

	if (rain->buf_len == DATA_SIZE) {
		dev_warn_once(rain->dev, "buffer overflow\n");
		return IRQ_HANDLED;
	}
	spin_lock(&rain->buf_lock);
	rain->buf_len++;
	rain->buf[rain->buf_wr_idx] = data;
	rain->buf_wr_idx = (rain->buf_wr_idx + 1) & 0xff;
	spin_unlock(&rain->buf_lock);
	schedule_work(&rain->work);
	return IRQ_HANDLED;
}
Ejemplo n.º 5
0
static bool bcm2835_spi_can_dma(struct spi_master *master,
				struct spi_device *spi,
				struct spi_transfer *tfr)
{
	/* we start DMA efforts only on bigger transfers */
	if (tfr->len < BCM2835_SPI_DMA_MIN_LENGTH)
		return false;

	/* BCM2835_SPI_DLEN has defined a max transfer size as
	 * 16 bit, so max is 65535
	 * we can revisit this by using an alternative transfer
	 * method - ideally this would get done without any more
	 * interaction...
	 */
	if (tfr->len > 65535) {
		dev_warn_once(&spi->dev,
			      "transfer size of %d too big for dma-transfer\n",
			      tfr->len);
		return false;
	}

	/* return OK */
	return true;
}
Ejemplo n.º 6
0
static int
nfp_bpf_check_call(struct nfp_prog *nfp_prog, struct bpf_verifier_env *env,
		   struct nfp_insn_meta *meta)
{
	const struct bpf_reg_state *reg1 = cur_regs(env) + BPF_REG_1;
	const struct bpf_reg_state *reg2 = cur_regs(env) + BPF_REG_2;
	const struct bpf_reg_state *reg3 = cur_regs(env) + BPF_REG_3;
	struct nfp_app_bpf *bpf = nfp_prog->bpf;
	u32 func_id = meta->insn.imm;

	switch (func_id) {
	case BPF_FUNC_xdp_adjust_head:
		if (!bpf->adjust_head.off_max) {
			pr_vlog(env, "adjust_head not supported by FW\n");
			return -EOPNOTSUPP;
		}
		if (!(bpf->adjust_head.flags & NFP_BPF_ADJUST_HEAD_NO_META)) {
			pr_vlog(env, "adjust_head: FW requires shifting metadata, not supported by the driver\n");
			return -EOPNOTSUPP;
		}

		nfp_record_adjust_head(bpf, nfp_prog, meta, reg2);
		break;

	case BPF_FUNC_map_lookup_elem:
		if (!nfp_bpf_map_call_ok("map_lookup", env, meta,
					 bpf->helpers.map_lookup, reg1) ||
		    !nfp_bpf_stack_arg_ok("map_lookup", env, reg2,
					  meta->func_id ? &meta->arg2 : NULL))
			return -EOPNOTSUPP;
		break;

	case BPF_FUNC_map_update_elem:
		if (!nfp_bpf_map_call_ok("map_update", env, meta,
					 bpf->helpers.map_update, reg1) ||
		    !nfp_bpf_stack_arg_ok("map_update", env, reg2,
					  meta->func_id ? &meta->arg2 : NULL) ||
		    !nfp_bpf_stack_arg_ok("map_update", env, reg3, NULL))
			return -EOPNOTSUPP;
		break;

	case BPF_FUNC_map_delete_elem:
		if (!nfp_bpf_map_call_ok("map_delete", env, meta,
					 bpf->helpers.map_delete, reg1) ||
		    !nfp_bpf_stack_arg_ok("map_delete", env, reg2,
					  meta->func_id ? &meta->arg2 : NULL))
			return -EOPNOTSUPP;
		break;

	case BPF_FUNC_get_prandom_u32:
		if (bpf->pseudo_random)
			break;
		pr_vlog(env, "bpf_get_prandom_u32(): FW doesn't support random number generation\n");
		return -EOPNOTSUPP;

	case BPF_FUNC_perf_event_output:
		BUILD_BUG_ON(NFP_BPF_SCALAR_VALUE != SCALAR_VALUE ||
			     NFP_BPF_MAP_VALUE != PTR_TO_MAP_VALUE ||
			     NFP_BPF_STACK != PTR_TO_STACK ||
			     NFP_BPF_PACKET_DATA != PTR_TO_PACKET);

		if (!bpf->helpers.perf_event_output) {
			pr_vlog(env, "event_output: not supported by FW\n");
			return -EOPNOTSUPP;
		}

		/* Force current CPU to make sure we can report the event
		 * wherever we get the control message from FW.
		 */
		if (reg3->var_off.mask & BPF_F_INDEX_MASK ||
		    (reg3->var_off.value & BPF_F_INDEX_MASK) !=
		    BPF_F_CURRENT_CPU) {
			char tn_buf[48];

			tnum_strn(tn_buf, sizeof(tn_buf), reg3->var_off);
			pr_vlog(env, "event_output: must use BPF_F_CURRENT_CPU, var_off: %s\n",
				tn_buf);
			return -EOPNOTSUPP;
		}

		/* Save space in meta, we don't care about arguments other
		 * than 4th meta, shove it into arg1.
		 */
		reg1 = cur_regs(env) + BPF_REG_4;

		if (reg1->type != SCALAR_VALUE /* NULL ptr */ &&
		    reg1->type != PTR_TO_STACK &&
		    reg1->type != PTR_TO_MAP_VALUE &&
		    reg1->type != PTR_TO_PACKET) {
			pr_vlog(env, "event_output: unsupported ptr type: %d\n",
				reg1->type);
			return -EOPNOTSUPP;
		}

		if (reg1->type == PTR_TO_STACK &&
		    !nfp_bpf_stack_arg_ok("event_output", env, reg1, NULL))
			return -EOPNOTSUPP;

		/* Warn user that on offload NFP may return success even if map
		 * is not going to accept the event, since the event output is
		 * fully async and device won't know the state of the map.
		 * There is also FW limitation on the event length.
		 *
		 * Lost events will not show up on the perf ring, driver
		 * won't see them at all.  Events may also get reordered.
		 */
		dev_warn_once(&nfp_prog->bpf->app->pf->pdev->dev,
			      "bpf: note: return codes and behavior of bpf_event_output() helper differs for offloaded programs!\n");
		pr_vlog(env, "warning: return codes and behavior of event_output helper differ for offload!\n");

		if (!meta->func_id)
			break;

		if (reg1->type != meta->arg1.type) {
			pr_vlog(env, "event_output: ptr type changed: %d %d\n",
				meta->arg1.type, reg1->type);
			return -EINVAL;
		}
		break;

	default:
		pr_vlog(env, "unsupported function id: %d\n", func_id);
		return -EOPNOTSUPP;
	}

	meta->func_id = func_id;
	meta->arg1 = *reg1;
	meta->arg2.reg = *reg2;

	return 0;
}
Ejemplo n.º 7
0
struct coresight_platform_data *
of_get_coresight_platform_data(struct device *dev,
			       const struct device_node *node)
{
	int ret = 0;
	struct coresight_platform_data *pdata;
	struct coresight_connection *conn;
	struct device_node *ep = NULL;
	const struct device_node *parent = NULL;
	bool legacy_binding = false;

	pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
	if (!pdata)
		return ERR_PTR(-ENOMEM);

	/* Use device name as sysfs handle */
	pdata->name = dev_name(dev);
	pdata->cpu = of_coresight_get_cpu(node);

	/* Get the number of input and output port for this component */
	of_coresight_get_ports(node, &pdata->nr_inport, &pdata->nr_outport);

	/* If there are no output connections, we are done */
	if (!pdata->nr_outport)
		return pdata;

	ret = of_coresight_alloc_memory(dev, pdata);
	if (ret)
		return ERR_PTR(ret);

	parent = of_coresight_get_output_ports_node(node);
	/*
	 * If the DT uses obsoleted bindings, the ports are listed
	 * under the device and we need to filter out the input
	 * ports.
	 */
	if (!parent) {
		legacy_binding = true;
		parent = node;
		dev_warn_once(dev, "Uses obsolete Coresight DT bindings\n");
	}

	conn = pdata->conns;

	/* Iterate through each output port to discover topology */
	while ((ep = of_graph_get_next_endpoint(parent, ep))) {
		/*
		 * Legacy binding mixes input/output ports under the
		 * same parent. So, skip the input ports if we are dealing
		 * with legacy binding, as they processed with their
		 * connected output ports.
		 */
		if (legacy_binding && of_coresight_legacy_ep_is_input(ep))
			continue;

		ret = of_coresight_parse_endpoint(dev, ep, conn);
		switch (ret) {
		case 1:
			conn++;		/* Fall through */
		case 0:
			break;
		default:
			return ERR_PTR(ret);
		}
	}

	return pdata;
}