Exemplo n.º 1
0
static irqreturn_t khwq_acc_int_handler(int irq, void *_instdata)
{
	struct hwqueue_instance *inst = NULL;
	struct khwq_acc_channel *acc;
	struct khwq_instance *kq = NULL;
	struct khwq_range_info *range;
	struct hwqueue_device *hdev;
	struct khwq_pdsp_info *pdsp;
	struct khwq_acc_info *info;
	struct khwq_device *kdev;

	u32 *list, *list_cpu, val, idx, notifies;
	int range_base, channel, queue = 0;
	dma_addr_t list_dma;

	range = _instdata;
	info  = &range->acc_info;
	kdev  = range->kdev;
	hdev  = to_hdev(kdev);
	pdsp  = range->acc_info.pdsp;
	acc   = range->acc;

	range_base = kdev->base_id + range->queue_base;

	if ((range->flags & RANGE_MULTI_QUEUE) == 0) {
		/* TODO: this needs extent checks */
		for (queue = 0; queue < range->num_irqs; queue++)
			if (range->irqs[queue] == irq)
				break;
		inst	 = hwqueue_id_to_inst(hdev, range_base + queue);
		kq	 = hwqueue_inst_to_priv(inst);
		acc	+= queue;
	}

	channel = acc->channel;
	list_dma = acc->list_dma[acc->list_index];
	list_cpu = acc->list_cpu[acc->list_index];

	dev_dbg(kdev->dev, "acc-irq: channel %d, list %d, virt %p, phys %x\n",
		channel, acc->list_index, list_cpu, list_dma);

	if (atomic_read(&acc->retrigger_count)) {
		atomic_dec(&acc->retrigger_count);
		__khwq_acc_notify(range, acc);

		__raw_writel(1, pdsp->intd + ACC_INTD_OFFSET_COUNT(channel));

		/* ack the interrupt */
		__raw_writel(ACC_CHANNEL_INT_BASE + channel,
			     pdsp->intd + ACC_INTD_OFFSET_EOI);

		return IRQ_HANDLED;
	}

	notifies = __raw_readl(pdsp->intd + ACC_INTD_OFFSET_COUNT(channel));
	WARN_ON(!notifies);

	dma_sync_single_for_cpu(kdev->dev, list_dma, info->list_size, DMA_FROM_DEVICE);

	for (list = list_cpu; list < list_cpu + (info->list_size / sizeof(u32));
	     list += ACC_LIST_ENTRY_WORDS) {

		if (ACC_LIST_ENTRY_WORDS == 1) {
			dev_dbg(kdev->dev, "acc-irq: list %d, entry @%p, "
				"%08x\n",
				acc->list_index, list, list[0]);
		} else if (ACC_LIST_ENTRY_WORDS == 2) {
			dev_dbg(kdev->dev, "acc-irq: list %d, entry @%p, "
				"%08x %08x\n",
				acc->list_index, list, list[0], list[1]);
		} else if (ACC_LIST_ENTRY_WORDS == 4) {
			dev_dbg(kdev->dev, "acc-irq: list %d, entry @%p, "
				"%08x %08x %08x %08x\n",
				acc->list_index, list,
				list[0], list[1], list[2], list[3]);
		}

		val = list[ACC_LIST_ENTRY_DESC_IDX];

		if (!val)
			break;

		if (range->flags & RANGE_MULTI_QUEUE) {
			queue = list[ACC_LIST_ENTRY_QUEUE_IDX] >> 16;
			if (queue < range_base || queue >= range_base + range->num_queues) {
				dev_err(kdev->dev, "bad queue %d, expecting %d-%d\n",
					queue, range_base, range_base + range->num_queues);
				break;
			}
			queue -= range_base;
			inst = hwqueue_id_to_inst(hdev, range_base + queue);
			kq = hwqueue_inst_to_priv(inst);
		}

		if (atomic_inc_return(&kq->desc_count) >= ACC_DESCS_MAX) {
			atomic_dec(&kq->desc_count);
			/* TODO: need a statistics counter for such drops */
			continue;
		}

		idx = atomic_inc_return(&kq->desc_tail) & ACC_DESCS_MASK;
		kq->descs[idx] = val;
		dev_dbg(kdev->dev, "acc-irq: enqueue %08x at %d, queue %d\n",
			val, idx, queue + range_base);
	}
Exemplo n.º 2
0
/* This is an inline function, we don't really care about a long
 * list of arguments */
static inline int 
__build_packet_message(struct nfulnl_instance *inst,
			const struct sk_buff *skb, 
			unsigned int data_len,
			unsigned int pf,
			unsigned int hooknum,
			const struct net_device *indev,
			const struct net_device *outdev,
			const struct nf_loginfo *li,
			const char *prefix)
{
	unsigned char *old_tail;
	struct nfulnl_msg_packet_hdr pmsg;
	struct nlmsghdr *nlh;
	struct nfgenmsg *nfmsg;
	u_int32_t tmp_uint;

	UDEBUG("entered\n");
		
	old_tail = inst->skb->tail;
	nlh = NLMSG_PUT(inst->skb, 0, 0, 
			NFNL_SUBSYS_ULOG << 8 | NFULNL_MSG_PACKET,
			sizeof(struct nfgenmsg));
	nfmsg = NLMSG_DATA(nlh);
	nfmsg->nfgen_family = pf;
	nfmsg->version = NFNETLINK_V0;
	nfmsg->res_id = htons(inst->group_num);

	pmsg.hw_protocol	= htons(skb->protocol);
	pmsg.hook		= hooknum;

	NFA_PUT(inst->skb, NFULA_PACKET_HDR, sizeof(pmsg), &pmsg);

	if (prefix) {
		int slen = strlen(prefix);
		if (slen > NFULNL_PREFIXLEN)
			slen = NFULNL_PREFIXLEN;
		NFA_PUT(inst->skb, NFULA_PREFIX, slen, prefix);
	}

	if (indev) {
		tmp_uint = htonl(indev->ifindex);
#ifndef CONFIG_BRIDGE_NETFILTER
		NFA_PUT(inst->skb, NFULA_IFINDEX_INDEV, sizeof(tmp_uint),
			&tmp_uint);
#else
		if (pf == PF_BRIDGE) {
			/* Case 1: outdev is physical input device, we need to
			 * look for bridge group (when called from
			 * netfilter_bridge) */
			NFA_PUT(inst->skb, NFULA_IFINDEX_PHYSINDEV,
				sizeof(tmp_uint), &tmp_uint);
			/* this is the bridge group "brX" */
			tmp_uint = htonl(indev->br_port->br->dev->ifindex);
			NFA_PUT(inst->skb, NFULA_IFINDEX_INDEV,
				sizeof(tmp_uint), &tmp_uint);
		} else {
			/* Case 2: indev is bridge group, we need to look for
			 * physical device (when called from ipv4) */
			NFA_PUT(inst->skb, NFULA_IFINDEX_INDEV,
				sizeof(tmp_uint), &tmp_uint);
			if (skb->nf_bridge && skb->nf_bridge->physindev) {
				tmp_uint = 
				    htonl(skb->nf_bridge->physindev->ifindex);
				NFA_PUT(inst->skb, NFULA_IFINDEX_PHYSINDEV,
					sizeof(tmp_uint), &tmp_uint);
			}
		}
#endif
	}

	if (outdev) {
		tmp_uint = htonl(outdev->ifindex);
#ifndef CONFIG_BRIDGE_NETFILTER
		NFA_PUT(inst->skb, NFULA_IFINDEX_OUTDEV, sizeof(tmp_uint),
			&tmp_uint);
#else
		if (pf == PF_BRIDGE) {
			/* Case 1: outdev is physical output device, we need to
			 * look for bridge group (when called from
			 * netfilter_bridge) */
			NFA_PUT(inst->skb, NFULA_IFINDEX_PHYSOUTDEV,
				sizeof(tmp_uint), &tmp_uint);
			/* this is the bridge group "brX" */
			tmp_uint = htonl(outdev->br_port->br->dev->ifindex);
			NFA_PUT(inst->skb, NFULA_IFINDEX_OUTDEV,
				sizeof(tmp_uint), &tmp_uint);
		} else {
			/* Case 2: indev is a bridge group, we need to look
			 * for physical device (when called from ipv4) */
			NFA_PUT(inst->skb, NFULA_IFINDEX_OUTDEV,
				sizeof(tmp_uint), &tmp_uint);
			if (skb->nf_bridge) {
				tmp_uint = 
				    htonl(skb->nf_bridge->physoutdev->ifindex);
				NFA_PUT(inst->skb, NFULA_IFINDEX_PHYSOUTDEV,
					sizeof(tmp_uint), &tmp_uint);
			}
		}
#endif
	}

	if (skb->nfmark) {
		tmp_uint = htonl(skb->nfmark);
		NFA_PUT(inst->skb, NFULA_MARK, sizeof(tmp_uint), &tmp_uint);
	}

	if (indev && skb->dev && skb->dev->hard_header_parse) {
		struct nfulnl_msg_packet_hw phw;

		phw.hw_addrlen = 
			skb->dev->hard_header_parse((struct sk_buff *)skb, 
						    phw.hw_addr);
		phw.hw_addrlen = htons(phw.hw_addrlen);
		NFA_PUT(inst->skb, NFULA_HWADDR, sizeof(phw), &phw);
	}

	if (skb->tstamp.off_sec) {
		struct nfulnl_msg_packet_timestamp ts;

		ts.sec = cpu_to_be64(skb->tstamp.off_sec);
		ts.usec = cpu_to_be64(skb->tstamp.off_usec);

		NFA_PUT(inst->skb, NFULA_TIMESTAMP, sizeof(ts), &ts);
	}

	/* UID */
	if (skb->sk) {
		read_lock_bh(&skb->sk->sk_callback_lock);
		if (skb->sk->sk_socket && skb->sk->sk_socket->file) {
			u_int32_t uid = htonl(skb->sk->sk_socket->file->f_uid);
			/* need to unlock here since NFA_PUT may goto */
			read_unlock_bh(&skb->sk->sk_callback_lock);
			NFA_PUT(inst->skb, NFULA_UID, sizeof(uid), &uid);
		} else
			read_unlock_bh(&skb->sk->sk_callback_lock);
	}

	/* local sequence number */
	if (inst->flags & NFULNL_CFG_F_SEQ) {
		tmp_uint = htonl(inst->seq++);
		NFA_PUT(inst->skb, NFULA_SEQ, sizeof(tmp_uint), &tmp_uint);
	}
	/* global sequence number */
	if (inst->flags & NFULNL_CFG_F_SEQ_GLOBAL) {
		tmp_uint = atomic_inc_return(&global_seq);
		NFA_PUT(inst->skb, NFULA_SEQ_GLOBAL, sizeof(tmp_uint), &tmp_uint);
	}

	if (data_len) {
		struct nfattr *nfa;
		int size = NFA_LENGTH(data_len);

		if (skb_tailroom(inst->skb) < (int)NFA_SPACE(data_len)) {
			printk(KERN_WARNING "nfnetlink_log: no tailroom!\n");
			goto nlmsg_failure;
		}

		nfa = (struct nfattr *)skb_put(inst->skb, NFA_ALIGN(size));
		nfa->nfa_type = NFULA_PAYLOAD;
		nfa->nfa_len = size;

		if (skb_copy_bits(skb, 0, NFA_DATA(nfa), data_len))
			BUG();
	}
		
	nlh->nlmsg_len = inst->skb->tail - old_tail;
	return 0;

nlmsg_failure:
	UDEBUG("nlmsg_failure\n");
nfattr_failure:
	PRINTR(KERN_ERR "nfnetlink_log: error creating log nlmsg\n");
	return -1;
}
u32 _mali_osk_atomic_inc_return( _mali_osk_atomic_t *atom )
{
	return atomic_inc_return((atomic_t *)&atom->u.val);
}
Exemplo n.º 4
0
int spi_gpio_next_id(void)
{
	static atomic_t counter = ATOMIC_INIT(-1);

	return atomic_inc_return(&counter);
}
Exemplo n.º 5
0
/* This is an inline function, we don't really care about a long
 * list of arguments */
static inline int
__build_packet_message(struct nfulnl_instance *inst,
			const struct sk_buff *skb,
			unsigned int data_len,
			u_int8_t pf,
			unsigned int hooknum,
			const struct net_device *indev,
			const struct net_device *outdev,
			const struct nf_loginfo *li,
			const char *prefix, unsigned int plen)
{
	struct nfulnl_msg_packet_hdr pmsg;
	struct nlmsghdr *nlh;
	struct nfgenmsg *nfmsg;
	__be32 tmp_uint;
	sk_buff_data_t old_tail = inst->skb->tail;

	nlh = NLMSG_PUT(inst->skb, 0, 0,
			NFNL_SUBSYS_ULOG << 8 | NFULNL_MSG_PACKET,
			sizeof(struct nfgenmsg));
	nfmsg = NLMSG_DATA(nlh);
	nfmsg->nfgen_family = pf;
	nfmsg->version = NFNETLINK_V0;
	nfmsg->res_id = htons(inst->group_num);

	pmsg.hw_protocol	= skb->protocol;
	pmsg.hook		= hooknum;

	NLA_PUT(inst->skb, NFULA_PACKET_HDR, sizeof(pmsg), &pmsg);

	if (prefix)
		NLA_PUT(inst->skb, NFULA_PREFIX, plen, prefix);

	if (indev) {
#ifndef CONFIG_BRIDGE_NETFILTER
		NLA_PUT_BE32(inst->skb, NFULA_IFINDEX_INDEV,
			     htonl(indev->ifindex));
#else
		if (pf == PF_BRIDGE) {
			/* Case 1: outdev is physical input device, we need to
			 * look for bridge group (when called from
			 * netfilter_bridge) */
			NLA_PUT_BE32(inst->skb, NFULA_IFINDEX_PHYSINDEV,
				     htonl(indev->ifindex));
			/* this is the bridge group "brX" */
			NLA_PUT_BE32(inst->skb, NFULA_IFINDEX_INDEV,
				     htonl(indev->br_port->br->dev->ifindex));
		} else {
			/* Case 2: indev is bridge group, we need to look for
			 * physical device (when called from ipv4) */
			NLA_PUT_BE32(inst->skb, NFULA_IFINDEX_INDEV,
				     htonl(indev->ifindex));
			if (skb->nf_bridge && skb->nf_bridge->physindev)
				NLA_PUT_BE32(inst->skb, NFULA_IFINDEX_PHYSINDEV,
					     htonl(skb->nf_bridge->physindev->ifindex));
		}
#endif
	}

	if (outdev) {
		tmp_uint = htonl(outdev->ifindex);
#ifndef CONFIG_BRIDGE_NETFILTER
		NLA_PUT_BE32(inst->skb, NFULA_IFINDEX_OUTDEV,
			     htonl(outdev->ifindex));
#else
		if (pf == PF_BRIDGE) {
			/* Case 1: outdev is physical output device, we need to
			 * look for bridge group (when called from
			 * netfilter_bridge) */
			NLA_PUT_BE32(inst->skb, NFULA_IFINDEX_PHYSOUTDEV,
				     htonl(outdev->ifindex));
			/* this is the bridge group "brX" */
			NLA_PUT_BE32(inst->skb, NFULA_IFINDEX_OUTDEV,
				     htonl(outdev->br_port->br->dev->ifindex));
		} else {
			/* Case 2: indev is a bridge group, we need to look
			 * for physical device (when called from ipv4) */
			NLA_PUT_BE32(inst->skb, NFULA_IFINDEX_OUTDEV,
				     htonl(outdev->ifindex));
			if (skb->nf_bridge && skb->nf_bridge->physoutdev)
				NLA_PUT_BE32(inst->skb, NFULA_IFINDEX_PHYSOUTDEV,
					     htonl(skb->nf_bridge->physoutdev->ifindex));
		}
#endif
	}

	if (skb->mark)
		NLA_PUT_BE32(inst->skb, NFULA_MARK, htonl(skb->mark));

	if (indev && skb->dev) {
		struct nfulnl_msg_packet_hw phw;
		int len = dev_parse_header(skb, phw.hw_addr);
		if (len > 0) {
			phw.hw_addrlen = htons(len);
			NLA_PUT(inst->skb, NFULA_HWADDR, sizeof(phw), &phw);
		}
	}

	if (indev && skb_mac_header_was_set(skb)) {
		NLA_PUT_BE16(inst->skb, NFULA_HWTYPE, htons(skb->dev->type));
		NLA_PUT_BE16(inst->skb, NFULA_HWLEN,
			     htons(skb->dev->hard_header_len));
		NLA_PUT(inst->skb, NFULA_HWHEADER, skb->dev->hard_header_len,
			skb_mac_header(skb));
	}

	if (skb->tstamp.tv64) {
		struct nfulnl_msg_packet_timestamp ts;
		struct timeval tv = ktime_to_timeval(skb->tstamp);
		ts.sec = cpu_to_be64(tv.tv_sec);
		ts.usec = cpu_to_be64(tv.tv_usec);

		NLA_PUT(inst->skb, NFULA_TIMESTAMP, sizeof(ts), &ts);
	}

	/* UID */
	if (skb->sk) {
		read_lock_bh(&skb->sk->sk_callback_lock);
		if (skb->sk->sk_socket && skb->sk->sk_socket->file) {
			struct file *file = skb->sk->sk_socket->file;
			__be32 uid = htonl(file->f_cred->fsuid);
			__be32 gid = htonl(file->f_cred->fsgid);
			/* need to unlock here since NLA_PUT may goto */
			read_unlock_bh(&skb->sk->sk_callback_lock);
			NLA_PUT_BE32(inst->skb, NFULA_UID, uid);
			NLA_PUT_BE32(inst->skb, NFULA_GID, gid);
		} else
			read_unlock_bh(&skb->sk->sk_callback_lock);
	}

	/* local sequence number */
	if (inst->flags & NFULNL_CFG_F_SEQ)
		NLA_PUT_BE32(inst->skb, NFULA_SEQ, htonl(inst->seq++));

	/* global sequence number */
	if (inst->flags & NFULNL_CFG_F_SEQ_GLOBAL)
		NLA_PUT_BE32(inst->skb, NFULA_SEQ_GLOBAL,
			     htonl(atomic_inc_return(&global_seq)));

	if (data_len) {
		struct nlattr *nla;
		int size = nla_attr_size(data_len);

		if (skb_tailroom(inst->skb) < nla_total_size(data_len)) {
			printk(KERN_WARNING "nfnetlink_log: no tailroom!\n");
			goto nlmsg_failure;
		}

		nla = (struct nlattr *)skb_put(inst->skb, nla_total_size(data_len));
		nla->nla_type = NFULA_PAYLOAD;
		nla->nla_len = size;

		if (skb_copy_bits(skb, 0, nla_data(nla), data_len))
			BUG();
	}

	nlh->nlmsg_len = inst->skb->tail - old_tail;
	return 0;

nlmsg_failure:
nla_put_failure:
	PRINTR(KERN_ERR "nfnetlink_log: error creating log nlmsg\n");
	return -1;
}
Exemplo n.º 6
0
static int cpufreq_governor_interactivex(struct cpufreq_policy *new_policy,
		unsigned int event)
{
	int rc;
	unsigned int min_freq = ~0;
	unsigned int max_freq = 0;
	unsigned int i;
	struct cpufreq_frequency_table *freq_table;

	switch (event) {
	case CPUFREQ_GOV_START:
		if (!cpu_online(new_policy->cpu))
			return -EINVAL;

		/*
		 * Do not register the idle hook and create sysfs
		 * entries if we have already done so.
		 */
		if (atomic_inc_return(&active_count) > 1)
			return 0;

		rc = sysfs_create_group(cpufreq_global_kobject,
				&interactivex_attr_group);
		if (rc)
			return rc;

		pm_idle_old = pm_idle;
		pm_idle = cpufreq_idle;
		policy = new_policy;
		enabled = 1;
        	register_early_suspend(&interactivex_power_suspend);
        	pr_info("[imoseyon] interactiveX active\n");
		freq_table = cpufreq_frequency_get_table(new_policy->cpu);
		for (i = 0; (freq_table[i].frequency != CPUFREQ_TABLE_END); i++) {
			unsigned int freq = freq_table[i].frequency;
			if (freq == CPUFREQ_ENTRY_INVALID) {
				continue;
			}
			if (freq < min_freq)	
				min_freq = freq;
			if (freq > max_freq)
				max_freq = freq;
		}
		resum_speed = freq_table[(i-1)/2].frequency > min_freq ? freq_table[(i-1)/2].frequency : max_freq;		//Value in midrange of available CPU frequencies if sufficient number of freq bins available
		freq_threshld = max_freq;
		break;

	case CPUFREQ_GOV_STOP:
		if (atomic_dec_return(&active_count) > 1)
			return 0;

		sysfs_remove_group(cpufreq_global_kobject,
				&interactivex_attr_group);

		pm_idle = pm_idle_old;
		del_timer(&per_cpu(cpu_timer, new_policy->cpu));
		enabled = 0;
        	unregister_early_suspend(&interactivex_power_suspend);
        	pr_info("[imoseyon] interactiveX inactive\n");
			break;

	case CPUFREQ_GOV_LIMITS:
		if (new_policy->max < new_policy->cur)
			__cpufreq_driver_target(new_policy,
					new_policy->max, CPUFREQ_RELATION_H);
		else if (new_policy->min > new_policy->cur)
			__cpufreq_driver_target(new_policy,
					new_policy->min, CPUFREQ_RELATION_L);
		break;
	}
	return 0;
}
Exemplo n.º 7
0
static void ar5523_data_rx_cb(struct urb *urb)
{
	struct ar5523_rx_data *data = urb->context;
	struct ar5523 *ar = data->ar;
	struct ar5523_rx_desc *desc;
	struct ar5523_chunk *chunk;
	struct ieee80211_hw *hw = ar->hw;
	struct ieee80211_rx_status *rx_status;
	u32 rxlen;
	int usblen = urb->actual_length;
	int hdrlen, pad;

	ar5523_dbg(ar, "%s\n", __func__);
	/* sync/async unlink faults aren't errors */
	if (urb->status) {
		if (urb->status != -ESHUTDOWN)
			ar5523_err(ar, "%s: USB err: %d\n", __func__,
				   urb->status);
		goto skip;
	}

	if (usblen < AR5523_MIN_RXBUFSZ) {
		ar5523_err(ar, "RX: wrong xfer size (usblen=%d)\n", usblen);
		goto skip;
	}

	chunk = (struct ar5523_chunk *) data->skb->data;

	if (((chunk->flags & UATH_CFLAGS_FINAL) == 0) ||
		chunk->seqnum != 0) {
		ar5523_dbg(ar, "RX: No final flag. s: %d f: %02x l: %d\n",
			   chunk->seqnum, chunk->flags,
			   be16_to_cpu(chunk->length));
		goto skip;
	}

	/* Rx descriptor is located at the end, 32-bit aligned */
	desc = (struct ar5523_rx_desc *)
		(data->skb->data + usblen - sizeof(struct ar5523_rx_desc));

	rxlen = be32_to_cpu(desc->len);
	if (rxlen > ar->rxbufsz) {
		ar5523_dbg(ar, "RX: Bad descriptor (len=%d)\n",
			   be32_to_cpu(desc->len));
		goto skip;
	}

	if (!rxlen) {
		ar5523_dbg(ar, "RX: rxlen is 0\n");
		goto skip;
	}

	if (be32_to_cpu(desc->status) != 0) {
		ar5523_dbg(ar, "Bad RX status (0x%x len = %d). Skip\n",
			   be32_to_cpu(desc->status), be32_to_cpu(desc->len));
		goto skip;
	}

	skb_reserve(data->skb, sizeof(*chunk));
	skb_put(data->skb, rxlen - sizeof(struct ar5523_rx_desc));

	hdrlen = ieee80211_get_hdrlen_from_skb(data->skb);
	if (!IS_ALIGNED(hdrlen, 4)) {
		ar5523_dbg(ar, "eek, alignment workaround activated\n");
		pad = ALIGN(hdrlen, 4) - hdrlen;
		memmove(data->skb->data + pad, data->skb->data, hdrlen);
		skb_pull(data->skb, pad);
		skb_put(data->skb, pad);
	}

	rx_status = IEEE80211_SKB_RXCB(data->skb);
	memset(rx_status, 0, sizeof(*rx_status));
	rx_status->freq = be32_to_cpu(desc->channel);
	rx_status->band = hw->conf.chandef.chan->band;
	rx_status->signal = -95 + be32_to_cpu(desc->rssi);

	ieee80211_rx_irqsafe(hw, data->skb);
	data->skb = NULL;

skip:
	if (data->skb) {
		dev_kfree_skb_irq(data->skb);
		data->skb = NULL;
	}

	ar5523_rx_data_put(ar, data);
	if (atomic_inc_return(&ar->rx_data_free_cnt) >=
	    AR5523_RX_DATA_REFILL_COUNT &&
	    test_bit(AR5523_HW_UP, &ar->flags))
		queue_work(ar->wq, &ar->rx_refill_work);
}
Exemplo n.º 8
0
/* This is an inline function, we don't really care about a long
 * list of arguments */
static inline int
__build_packet_message(struct nfnl_log_net *log,
			struct nfulnl_instance *inst,
			const struct sk_buff *skb,
			unsigned int data_len,
			u_int8_t pf,
			unsigned int hooknum,
			const struct net_device *indev,
			const struct net_device *outdev,
			const char *prefix, unsigned int plen)
{
	struct nfulnl_msg_packet_hdr pmsg;
	struct nlmsghdr *nlh;
	struct nfgenmsg *nfmsg;
	sk_buff_data_t old_tail = inst->skb->tail;
	struct sock *sk;
	const unsigned char *hwhdrp;

	nlh = nlmsg_put(inst->skb, 0, 0,
			NFNL_SUBSYS_ULOG << 8 | NFULNL_MSG_PACKET,
			sizeof(struct nfgenmsg), 0);
	if (!nlh)
		return -1;
	nfmsg = nlmsg_data(nlh);
	nfmsg->nfgen_family = pf;
	nfmsg->version = NFNETLINK_V0;
	nfmsg->res_id = htons(inst->group_num);

	memset(&pmsg, 0, sizeof(pmsg));
	pmsg.hw_protocol	= skb->protocol;
	pmsg.hook		= hooknum;

	if (nla_put(inst->skb, NFULA_PACKET_HDR, sizeof(pmsg), &pmsg))
		goto nla_put_failure;

	if (prefix &&
	    nla_put(inst->skb, NFULA_PREFIX, plen, prefix))
		goto nla_put_failure;

	if (indev) {
#ifndef CONFIG_BRIDGE_NETFILTER
		if (nla_put_be32(inst->skb, NFULA_IFINDEX_INDEV,
				 htonl(indev->ifindex)))
			goto nla_put_failure;
#else
		if (pf == PF_BRIDGE) {
			/* Case 1: outdev is physical input device, we need to
			 * look for bridge group (when called from
			 * netfilter_bridge) */
			if (nla_put_be32(inst->skb, NFULA_IFINDEX_PHYSINDEV,
					 htonl(indev->ifindex)) ||
			/* this is the bridge group "brX" */
			/* rcu_read_lock()ed by nf_hook_slow or nf_log_packet */
			    nla_put_be32(inst->skb, NFULA_IFINDEX_INDEV,
					 htonl(br_port_get_rcu(indev)->br->dev->ifindex)))
				goto nla_put_failure;
		} else {
			/* Case 2: indev is bridge group, we need to look for
			 * physical device (when called from ipv4) */
			if (nla_put_be32(inst->skb, NFULA_IFINDEX_INDEV,
					 htonl(indev->ifindex)))
				goto nla_put_failure;
			if (skb->nf_bridge && skb->nf_bridge->physindev &&
			    nla_put_be32(inst->skb, NFULA_IFINDEX_PHYSINDEV,
					 htonl(skb->nf_bridge->physindev->ifindex)))
				goto nla_put_failure;
		}
#endif
	}

	if (outdev) {
#ifndef CONFIG_BRIDGE_NETFILTER
		if (nla_put_be32(inst->skb, NFULA_IFINDEX_OUTDEV,
				 htonl(outdev->ifindex)))
			goto nla_put_failure;
#else
		if (pf == PF_BRIDGE) {
			/* Case 1: outdev is physical output device, we need to
			 * look for bridge group (when called from
			 * netfilter_bridge) */
			if (nla_put_be32(inst->skb, NFULA_IFINDEX_PHYSOUTDEV,
					 htonl(outdev->ifindex)) ||
			/* this is the bridge group "brX" */
			/* rcu_read_lock()ed by nf_hook_slow or nf_log_packet */
			    nla_put_be32(inst->skb, NFULA_IFINDEX_OUTDEV,
					 htonl(br_port_get_rcu(outdev)->br->dev->ifindex)))
				goto nla_put_failure;
		} else {
			/* Case 2: indev is a bridge group, we need to look
			 * for physical device (when called from ipv4) */
			if (nla_put_be32(inst->skb, NFULA_IFINDEX_OUTDEV,
					 htonl(outdev->ifindex)))
				goto nla_put_failure;
			if (skb->nf_bridge && skb->nf_bridge->physoutdev &&
			    nla_put_be32(inst->skb, NFULA_IFINDEX_PHYSOUTDEV,
					 htonl(skb->nf_bridge->physoutdev->ifindex)))
				goto nla_put_failure;
		}
#endif
	}

	if (skb->mark &&
	    nla_put_be32(inst->skb, NFULA_MARK, htonl(skb->mark)))
		goto nla_put_failure;

	if (indev && skb->dev &&
	    skb->mac_header != skb->network_header) {
		struct nfulnl_msg_packet_hw phw;
		int len;

		memset(&phw, 0, sizeof(phw));
		len = dev_parse_header(skb, phw.hw_addr);
		if (len > 0) {
			phw.hw_addrlen = htons(len);
			if (nla_put(inst->skb, NFULA_HWADDR, sizeof(phw), &phw))
				goto nla_put_failure;
		}
	}

	if (indev && skb_mac_header_was_set(skb)) {
		if (nla_put_be16(inst->skb, NFULA_HWTYPE, htons(skb->dev->type)) ||
		    nla_put_be16(inst->skb, NFULA_HWLEN,
				 htons(skb->dev->hard_header_len)))
			goto nla_put_failure;

		hwhdrp = skb_mac_header(skb);

		if (skb->dev->type == ARPHRD_SIT)
			hwhdrp -= ETH_HLEN;

		if (hwhdrp >= skb->head &&
		    nla_put(inst->skb, NFULA_HWHEADER,
			    skb->dev->hard_header_len, hwhdrp))
			goto nla_put_failure;
	}

	if (skb->tstamp.tv64) {
		struct nfulnl_msg_packet_timestamp ts;
		struct timeval tv = ktime_to_timeval(skb->tstamp);
		ts.sec = cpu_to_be64(tv.tv_sec);
		ts.usec = cpu_to_be64(tv.tv_usec);

		if (nla_put(inst->skb, NFULA_TIMESTAMP, sizeof(ts), &ts))
			goto nla_put_failure;
	}

	/* UID */
	sk = skb->sk;
	if (sk && sk->sk_state != TCP_TIME_WAIT) {
		read_lock_bh(&sk->sk_callback_lock);
		if (sk->sk_socket && sk->sk_socket->file) {
			struct file *file = sk->sk_socket->file;
			const struct cred *cred = file->f_cred;
			struct user_namespace *user_ns = inst->peer_user_ns;
			__be32 uid = htonl(from_kuid_munged(user_ns, cred->fsuid));
			__be32 gid = htonl(from_kgid_munged(user_ns, cred->fsgid));
			read_unlock_bh(&sk->sk_callback_lock);
			if (nla_put_be32(inst->skb, NFULA_UID, uid) ||
			    nla_put_be32(inst->skb, NFULA_GID, gid))
				goto nla_put_failure;
		} else
			read_unlock_bh(&sk->sk_callback_lock);
	}

	/* local sequence number */
	if ((inst->flags & NFULNL_CFG_F_SEQ) &&
	    nla_put_be32(inst->skb, NFULA_SEQ, htonl(inst->seq++)))
		goto nla_put_failure;

	/* global sequence number */
	if ((inst->flags & NFULNL_CFG_F_SEQ_GLOBAL) &&
	    nla_put_be32(inst->skb, NFULA_SEQ_GLOBAL,
			 htonl(atomic_inc_return(&log->global_seq))))
		goto nla_put_failure;

	if (data_len) {
		struct nlattr *nla;
		int size = nla_attr_size(data_len);

		if (skb_tailroom(inst->skb) < nla_total_size(data_len)) {
			printk(KERN_WARNING "nfnetlink_log: no tailroom!\n");
			return -1;
		}

		nla = (struct nlattr *)skb_put(inst->skb, nla_total_size(data_len));
		nla->nla_type = NFULA_PAYLOAD;
		nla->nla_len = size;

		if (skb_copy_bits(skb, 0, nla_data(nla), data_len))
			BUG();
	}

	nlh->nlmsg_len = inst->skb->tail - old_tail;
	return 0;

nla_put_failure:
	PRINTR(KERN_ERR "nfnetlink_log: error creating log nlmsg\n");
	return -1;
}
/*
 * Common MC error handling code.
 */
static irqreturn_t tegra_mc_error_thread(int irq, void *data)
{
	struct mc_client *client = NULL;
	const struct mc_error *fault;
	const char *smmu_info;
	unsigned long count;
	phys_addr_t addr;
	u32 status, intr = mc_intr;
	u32 write, secure;
	u32 client_id;

	cancel_delayed_work(&unthrottle_prints_work);

	if (intr & MC_INT_ARBITRATION_EMEM) {
		arb_intr();
		if (intr == MC_INT_ARBITRATION_EMEM)
			goto out;
		intr &= ~MC_INT_ARBITRATION_EMEM;
	}

	count = atomic_inc_return(&error_count);

	fault = chip_specific.mcerr_info(intr & mc_int_mask);
	if (WARN(!fault, "Unknown error! intr sig: 0x%08x\n",
		 intr & mc_int_mask))
		goto out;

	if (fault->flags & E_NO_STATUS) {
		mcerr_pr("MC fault - no status: %s\n", fault->msg);
		goto out;
	}

	status = __mc_readl(err_channel, fault->stat_reg);
	addr = __mc_readl(err_channel, fault->addr_reg);
	secure = !!(status & MC_ERR_STATUS_SECURE);
	write = !!(status & MC_ERR_STATUS_WRITE);
	client_id = status & 0xff;
	client = &mc_clients[client_id <= mc_client_last
			     ? client_id : mc_client_last];

	/*
	 * LPAE: make sure we get the extra 2 physical address bits available
	 * and pass them down to the printing function.
	 */
	addr |= (((phys_addr_t)(status & MC_ERR_STATUS_ADR_HI)) << 12);

	if (fault->flags & E_SMMU)
		smmu_info = smmu_page_attrib[MC_ERR_SMMU_BITS(status)];
	else
		smmu_info = NULL;

	mcerr_info_update(client, intr & mc_int_mask);

	if (mcerr_throttle_enabled && count >= MAX_PRINTS) {
		schedule_delayed_work(&unthrottle_prints_work, HZ/2);
		if (count == MAX_PRINTS)
			mcerr_pr("Too many MC errors; throttling prints\n");
		goto out;
	}

	chip_specific.mcerr_print(fault, client, status, addr, secure, write,
				  smmu_info);
out:
	mc_writel(intr, MC_INT_STATUS);
	mc_readl(MC_INT_MASK);

	mc_writel(mc_int_mask, MC_INT_MASK);
	return IRQ_HANDLED;
}
static inline int vref_get(struct fimc_is_video *video)
{
	return atomic_inc_return(&video->refcount) - 1;
}
Exemplo n.º 11
0
/*
 * send data through a socket
 * - must be called in process context
 * - caller holds the socket locked
 */
static int rxrpc_send_data(struct kiocb *iocb,
               struct rxrpc_sock *rx,
               struct rxrpc_call *call,
               struct msghdr *msg, size_t len)
{
    struct rxrpc_skb_priv *sp;
    unsigned char __user *from;
    struct sk_buff *skb;
    struct iovec *iov;
    struct sock *sk = &rx->sk;
    long timeo;
    bool more;
    int ret, ioc, segment, copied;

    _enter(",,,{%zu},%zu", msg->msg_iovlen, len);

    timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);

    /* this should be in poll */
    clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);

    if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN))
        return -EPIPE;

    iov = msg->msg_iov;
    ioc = msg->msg_iovlen - 1;
    from = iov->iov_base;
    segment = iov->iov_len;
    iov++;
    more = msg->msg_flags & MSG_MORE;

    skb = call->tx_pending;
    call->tx_pending = NULL;

    copied = 0;
    do {
        int copy;

        if (segment > len)
            segment = len;

        _debug("SEGMENT %d @%p", segment, from);

        if (!skb) {
            size_t size, chunk, max, space;

            _debug("alloc");

            if (CIRC_SPACE(call->acks_head, call->acks_tail,
                       call->acks_winsz) <= 0) {
                ret = -EAGAIN;
                if (msg->msg_flags & MSG_DONTWAIT)
                    goto maybe_error;
                ret = rxrpc_wait_for_tx_window(rx, call,
                                   &timeo);
                if (ret < 0)
                    goto maybe_error;
            }

            max = call->conn->trans->peer->maxdata;
            max -= call->conn->security_size;
            max &= ~(call->conn->size_align - 1UL);

            chunk = max;
            if (chunk > len && !more)
                chunk = len;

            space = chunk + call->conn->size_align;
            space &= ~(call->conn->size_align - 1UL);

            size = space + call->conn->header_size;

            _debug("SIZE: %zu/%zu/%zu", chunk, space, size);

            /* create a buffer that we can retain until it's ACK'd */
            skb = sock_alloc_send_skb(
                sk, size, msg->msg_flags & MSG_DONTWAIT, &ret);
            if (!skb)
                goto maybe_error;

            rxrpc_new_skb(skb);

            _debug("ALLOC SEND %p", skb);

            ASSERTCMP(skb->mark, ==, 0);

            _debug("HS: %u", call->conn->header_size);
            skb_reserve(skb, call->conn->header_size);
            skb->len += call->conn->header_size;

            sp = rxrpc_skb(skb);
            sp->remain = chunk;
            if (sp->remain > skb_tailroom(skb))
                sp->remain = skb_tailroom(skb);

            _net("skb: hr %d, tr %d, hl %d, rm %d",
                   skb_headroom(skb),
                   skb_tailroom(skb),
                   skb_headlen(skb),
                   sp->remain);

            skb->ip_summed = CHECKSUM_UNNECESSARY;
        }

        _debug("append");
        sp = rxrpc_skb(skb);

        /* append next segment of data to the current buffer */
        copy = skb_tailroom(skb);
        ASSERTCMP(copy, >, 0);
        if (copy > segment)
            copy = segment;
        if (copy > sp->remain)
            copy = sp->remain;

        _debug("add");
        ret = skb_add_data(skb, from, copy);
        _debug("added");
        if (ret < 0)
            goto efault;
        sp->remain -= copy;
        skb->mark += copy;
        copied += copy;

        len -= copy;
        segment -= copy;
        from += copy;
        while (segment == 0 && ioc > 0) {
            from = iov->iov_base;
            segment = iov->iov_len;
            iov++;
            ioc--;
        }
        if (len == 0) {
            segment = 0;
            ioc = 0;
        }

        /* check for the far side aborting the call or a network error
         * occurring */
        if (call->state > RXRPC_CALL_COMPLETE)
            goto call_aborted;

        /* add the packet to the send queue if it's now full */
        if (sp->remain <= 0 || (segment == 0 && !more)) {
            struct rxrpc_connection *conn = call->conn;
            size_t pad;

            /* pad out if we're using security */
            if (conn->security) {
                pad = conn->security_size + skb->mark;
                pad = conn->size_align - pad;
                pad &= conn->size_align - 1;
                _debug("pad %zu", pad);
                if (pad)
                    memset(skb_put(skb, pad), 0, pad);
            }

            sp->hdr.epoch = conn->epoch;
            sp->hdr.cid = call->cid;
            sp->hdr.callNumber = call->call_id;
            sp->hdr.seq =
                htonl(atomic_inc_return(&call->sequence));
            sp->hdr.serial =
                htonl(atomic_inc_return(&conn->serial));
            sp->hdr.type = RXRPC_PACKET_TYPE_DATA;
            sp->hdr.userStatus = 0;
            sp->hdr.securityIndex = conn->security_ix;
            sp->hdr._rsvd = 0;
            sp->hdr.serviceId = conn->service_id;

            sp->hdr.flags = conn->out_clientflag;
            if (len == 0 && !more)
                sp->hdr.flags |= RXRPC_LAST_PACKET;
            else if (CIRC_SPACE(call->acks_head, call->acks_tail,
                        call->acks_winsz) > 1)
                sp->hdr.flags |= RXRPC_MORE_PACKETS;

            ret = rxrpc_secure_packet(
                call, skb, skb->mark,
                skb->head + sizeof(struct rxrpc_header));
            if (ret < 0)
                goto out;

            memcpy(skb->head, &sp->hdr,
                   sizeof(struct rxrpc_header));
            rxrpc_queue_packet(call, skb, segment == 0 && !more);
            skb = NULL;
        }

    } while (segment > 0);
Exemplo n.º 12
0
/*
 * Create an inode for a dynamic root directory or an autocell dynamic
 * automount dir.
 */
struct inode *afs_iget_pseudo_dir(struct super_block *sb, bool root)
{
	struct afs_iget_data data;
	struct afs_super_info *as;
	struct afs_vnode *vnode;
	struct inode *inode;
	static atomic_t afs_autocell_ino;

	_enter("");

	as = sb->s_fs_info;
	if (as->volume) {
		data.volume = as->volume;
		data.fid.vid = as->volume->vid;
	}
	if (root) {
		data.fid.vnode = 1;
		data.fid.unique = 1;
	} else {
		data.fid.vnode = atomic_inc_return(&afs_autocell_ino);
		data.fid.unique = 0;
	}

	inode = iget5_locked(sb, data.fid.vnode,
			     afs_iget5_pseudo_dir_test, afs_iget5_set,
			     &data);
	if (!inode) {
		_leave(" = -ENOMEM");
		return ERR_PTR(-ENOMEM);
	}

	_debug("GOT INODE %p { ino=%lu, vl=%x, vn=%x, u=%x }",
	       inode, inode->i_ino, data.fid.vid, data.fid.vnode,
	       data.fid.unique);

	vnode = AFS_FS_I(inode);

	/* there shouldn't be an existing inode */
	BUG_ON(!(inode->i_state & I_NEW));

	inode->i_size		= 0;
	inode->i_mode		= S_IFDIR | S_IRUGO | S_IXUGO;
	if (root) {
		inode->i_op	= &afs_dynroot_inode_operations;
		inode->i_fop	= &afs_dynroot_file_operations;
	} else {
		inode->i_op	= &afs_autocell_inode_operations;
	}
	set_nlink(inode, 2);
	inode->i_uid		= GLOBAL_ROOT_UID;
	inode->i_gid		= GLOBAL_ROOT_GID;
	inode->i_ctime.tv_sec	= get_seconds();
	inode->i_ctime.tv_nsec	= 0;
	inode->i_atime		= inode->i_mtime = inode->i_ctime;
	inode->i_blocks		= 0;
	inode_set_iversion_raw(inode, 0);
	inode->i_generation	= 0;

	set_bit(AFS_VNODE_PSEUDODIR, &vnode->flags);
	if (!root) {
		set_bit(AFS_VNODE_MOUNTPOINT, &vnode->flags);
		inode->i_flags |= S_AUTOMOUNT;
	}

	inode->i_flags |= S_NOATIME;
	unlock_new_inode(inode);
	_leave(" = %p", inode);
	return inode;
}
Exemplo n.º 13
0
static int fifo_open(struct inode *inode, struct file *filp)
{
	struct pipe_inode_info *pipe;
	int ret;

	mutex_lock(&inode->i_mutex);
	pipe = inode->i_pipe;
	if (!pipe) {
		ret = -ENOMEM;
		pipe = alloc_pipe_info(inode);
		if (!pipe)
			goto err_nocleanup;
		inode->i_pipe = pipe;
	}
	filp->f_version = 0;

	/* We can only do regular read/write on fifos */
	filp->f_mode &= (FMODE_READ | FMODE_WRITE);

	switch (filp->f_mode) {
	case FMODE_READ:
	/*
	 *  O_RDONLY
	 *  POSIX.1 says that O_NONBLOCK means return with the FIFO
	 *  opened, even when there is no process writing the FIFO.
	 */
		filp->f_op = &read_pipefifo_fops;
		pipe->r_counter++;
		if (atomic_inc_return(&pipe->readers) == 1)
			wake_up_partner(inode);

		if (!atomic_read(&pipe->writers)) {
			if ((filp->f_flags & O_NONBLOCK)) {
				/* suppress POLLHUP until we have
				 * seen a writer */
				filp->f_version = pipe->w_counter;
			} else {
				wait_for_partner(inode, &pipe->w_counter);
				if(signal_pending(current))
					goto err_rd;
			}
		}
		break;
	
	case FMODE_WRITE:
	/*
	 *  O_WRONLY
	 *  POSIX.1 says that O_NONBLOCK means return -1 with
	 *  errno=ENXIO when there is no process reading the FIFO.
	 */
		ret = -ENXIO;
		if ((filp->f_flags & O_NONBLOCK) && !atomic_read(&pipe->readers))
			goto err;

		filp->f_op = &write_pipefifo_fops;
		pipe->w_counter++;
		if (atomic_inc_return(&pipe->writers) == 1)
			wake_up_partner(inode);

		if (!atomic_read(&pipe->readers)) {
			wait_for_partner(inode, &pipe->r_counter);
			if (signal_pending(current))
				goto err_wr;
		}
		break;
	
	case FMODE_READ | FMODE_WRITE:
	/*
	 *  O_RDWR
	 *  POSIX.1 leaves this case "undefined" when O_NONBLOCK is set.
	 *  This implementation will NEVER block on a O_RDWR open, since
	 *  the process can at least talk to itself.
	 */
		filp->f_op = &rdwr_pipefifo_fops;

		atomic_inc(&pipe->readers);
		atomic_inc(&pipe->writers);
		pipe->r_counter++;
		pipe->w_counter++;
		if (atomic_read(&pipe->readers) == 1 || atomic_read(&pipe->writers) == 1)
			wake_up_partner(inode);
		break;

	default:
		ret = -EINVAL;
		goto err;
	}

	/* Ok! */
	mutex_unlock(&inode->i_mutex);
	return 0;

err_rd:
	if (atomic_dec_and_test(&pipe->readers))
		wake_up_interruptible(&pipe->wait);
	ret = -ERESTARTSYS;
	goto err;

err_wr:
	if (atomic_dec_and_test(&pipe->writers))
		wake_up_interruptible(&pipe->wait);
	ret = -ERESTARTSYS;
	goto err;

err:
	if (!atomic_read(&pipe->readers) && !atomic_read(&pipe->writers))
		free_pipe_info(inode);

err_nocleanup:
	mutex_unlock(&inode->i_mutex);
	return ret;
}
Exemplo n.º 14
0
/**
 * i40iw_process_aeq - handle aeq events
 * @iwdev: iwarp device
 */
void i40iw_process_aeq(struct i40iw_device *iwdev)
{
	struct i40iw_sc_dev *dev = &iwdev->sc_dev;
	struct i40iw_aeq *aeq = &iwdev->aeq;
	struct i40iw_sc_aeq *sc_aeq = &aeq->sc_aeq;
	struct i40iw_aeqe_info aeinfo;
	struct i40iw_aeqe_info *info = &aeinfo;
	int ret;
	struct i40iw_qp *iwqp = NULL;
	struct i40iw_sc_cq *cq = NULL;
	struct i40iw_cq *iwcq = NULL;
	struct i40iw_sc_qp *qp = NULL;
	struct i40iw_qp_host_ctx_info *ctx_info = NULL;
	unsigned long flags;

	u32 aeqcnt = 0;

	if (!sc_aeq->size)
		return;

	do {
		memset(info, 0, sizeof(*info));
		ret = dev->aeq_ops->get_next_aeqe(sc_aeq, info);
		if (ret)
			break;

		aeqcnt++;
		i40iw_debug(dev, I40IW_DEBUG_AEQ,
			    "%s ae_id = 0x%x bool qp=%d qp_id = %d\n",
			    __func__, info->ae_id, info->qp, info->qp_cq_id);
		if (info->qp) {
			spin_lock_irqsave(&iwdev->qptable_lock, flags);
			iwqp = iwdev->qp_table[info->qp_cq_id];
			if (!iwqp) {
				spin_unlock_irqrestore(&iwdev->qptable_lock, flags);
				i40iw_debug(dev, I40IW_DEBUG_AEQ,
					    "%s qp_id %d is already freed\n",
					    __func__, info->qp_cq_id);
				continue;
			}
			i40iw_add_ref(&iwqp->ibqp);
			spin_unlock_irqrestore(&iwdev->qptable_lock, flags);
			qp = &iwqp->sc_qp;
			spin_lock_irqsave(&iwqp->lock, flags);
			iwqp->hw_tcp_state = info->tcp_state;
			iwqp->hw_iwarp_state = info->iwarp_state;
			iwqp->last_aeq = info->ae_id;
			spin_unlock_irqrestore(&iwqp->lock, flags);
			ctx_info = &iwqp->ctx_info;
			ctx_info->err_rq_idx_valid = true;
		} else {
			if (info->ae_id != I40IW_AE_CQ_OPERATION_ERROR)
				continue;
		}

		switch (info->ae_id) {
		case I40IW_AE_LLP_FIN_RECEIVED:
			if (qp->term_flags)
				continue;
			if (atomic_inc_return(&iwqp->close_timer_started) == 1) {
				iwqp->hw_tcp_state = I40IW_TCP_STATE_CLOSE_WAIT;
				if ((iwqp->hw_tcp_state == I40IW_TCP_STATE_CLOSE_WAIT) &&
				    (iwqp->ibqp_state == IB_QPS_RTS)) {
					i40iw_next_iw_state(iwqp,
							    I40IW_QP_STATE_CLOSING, 0, 0, 0);
					i40iw_cm_disconn(iwqp);
				}
				iwqp->cm_id->add_ref(iwqp->cm_id);
				i40iw_schedule_cm_timer(iwqp->cm_node,
							(struct i40iw_puda_buf *)iwqp,
							I40IW_TIMER_TYPE_CLOSE, 1, 0);
			}
			break;
		case I40IW_AE_LLP_CLOSE_COMPLETE:
			if (qp->term_flags)
				i40iw_terminate_done(qp, 0);
			else
				i40iw_cm_disconn(iwqp);
			break;
		case I40IW_AE_BAD_CLOSE:
			/* fall through */
		case I40IW_AE_RESET_SENT:
			i40iw_next_iw_state(iwqp, I40IW_QP_STATE_ERROR, 1, 0, 0);
			i40iw_cm_disconn(iwqp);
			break;
		case I40IW_AE_LLP_CONNECTION_RESET:
			if (atomic_read(&iwqp->close_timer_started))
				continue;
			i40iw_cm_disconn(iwqp);
			break;
		case I40IW_AE_QP_SUSPEND_COMPLETE:
			i40iw_qp_suspend_resume(dev, &iwqp->sc_qp, false);
			break;
		case I40IW_AE_TERMINATE_SENT:
			i40iw_terminate_send_fin(qp);
			break;
		case I40IW_AE_LLP_TERMINATE_RECEIVED:
			i40iw_terminate_received(qp, info);
			break;
		case I40IW_AE_CQ_OPERATION_ERROR:
			i40iw_pr_err("Processing an iWARP related AE for CQ misc = 0x%04X\n",
				     info->ae_id);
			cq = (struct i40iw_sc_cq *)(unsigned long)info->compl_ctx;
			iwcq = (struct i40iw_cq *)cq->back_cq;

			if (iwcq->ibcq.event_handler) {
				struct ib_event ibevent;

				ibevent.device = iwcq->ibcq.device;
				ibevent.event = IB_EVENT_CQ_ERR;
				ibevent.element.cq = &iwcq->ibcq;
				iwcq->ibcq.event_handler(&ibevent, iwcq->ibcq.cq_context);
			}
			break;
		case I40IW_AE_LLP_DOUBT_REACHABILITY:
			break;
		case I40IW_AE_PRIV_OPERATION_DENIED:
		case I40IW_AE_STAG_ZERO_INVALID:
		case I40IW_AE_IB_RREQ_AND_Q1_FULL:
		case I40IW_AE_DDP_UBE_INVALID_DDP_VERSION:
		case I40IW_AE_DDP_UBE_INVALID_MO:
		case I40IW_AE_DDP_UBE_INVALID_QN:
		case I40IW_AE_DDP_NO_L_BIT:
		case I40IW_AE_RDMAP_ROE_INVALID_RDMAP_VERSION:
		case I40IW_AE_RDMAP_ROE_UNEXPECTED_OPCODE:
		case I40IW_AE_ROE_INVALID_RDMA_READ_REQUEST:
		case I40IW_AE_ROE_INVALID_RDMA_WRITE_OR_READ_RESP:
		case I40IW_AE_INVALID_ARP_ENTRY:
		case I40IW_AE_INVALID_TCP_OPTION_RCVD:
		case I40IW_AE_STALE_ARP_ENTRY:
		case I40IW_AE_LLP_RECEIVED_MPA_CRC_ERROR:
		case I40IW_AE_LLP_SEGMENT_TOO_SMALL:
		case I40IW_AE_LLP_SYN_RECEIVED:
		case I40IW_AE_LLP_TOO_MANY_RETRIES:
		case I40IW_AE_LCE_QP_CATASTROPHIC:
		case I40IW_AE_LCE_FUNCTION_CATASTROPHIC:
		case I40IW_AE_LCE_CQ_CATASTROPHIC:
		case I40IW_AE_UDA_XMIT_DGRAM_TOO_LONG:
		case I40IW_AE_UDA_XMIT_DGRAM_TOO_SHORT:
			ctx_info->err_rq_idx_valid = false;
			/* fall through */
		default:
			if (!info->sq && ctx_info->err_rq_idx_valid) {
				ctx_info->err_rq_idx = info->wqe_idx;
				ctx_info->tcp_info_valid = false;
				ctx_info->iwarp_info_valid = false;
				ret = dev->iw_priv_qp_ops->qp_setctx(&iwqp->sc_qp,
								     iwqp->host_ctx.va,
								     ctx_info);
			}
			i40iw_terminate_connection(qp, info);
			break;
		}
		if (info->qp)
			i40iw_rem_ref(&iwqp->ibqp);
	} while (1);

	if (aeqcnt)
		dev->aeq_ops->repost_aeq_entries(dev, aeqcnt);
}
Exemplo n.º 15
0
static int __devinit cx18_probe(struct pci_dev *pci_dev,
				const struct pci_device_id *pci_id)
{
	int retval = 0;
	int i;
	u32 devtype;
	struct cx18 *cx;

	/* FIXME - module parameter arrays constrain max instances */
	i = atomic_inc_return(&cx18_instance) - 1;
	if (i >= CX18_MAX_CARDS) {
		printk(KERN_ERR "cx18: cannot manage card %d, driver has a "
		       "limit of 0 - %d\n", i, CX18_MAX_CARDS - 1);
		return -ENOMEM;
	}

	cx = kzalloc(sizeof(struct cx18), GFP_ATOMIC);
	if (cx == NULL) {
		printk(KERN_ERR "cx18: cannot manage card %d, out of memory\n",
		       i);
		return -ENOMEM;
	}
	cx->pci_dev = pci_dev;
	cx->instance = i;

	retval = v4l2_device_register(&pci_dev->dev, &cx->v4l2_dev);
	if (retval) {
		printk(KERN_ERR "cx18: v4l2_device_register of card %d failed"
		       "\n", cx->instance);
		kfree(cx);
		return retval;
	}
	snprintf(cx->v4l2_dev.name, sizeof(cx->v4l2_dev.name), "cx18-%d",
		 cx->instance);
	CX18_INFO("Initializing card %d\n", cx->instance);

	cx18_process_options(cx);
	if (cx->options.cardtype == -1) {
		retval = -ENODEV;
		goto err;
	}

	retval = cx18_init_struct1(cx);
	if (retval)
		goto err;

	CX18_DEBUG_INFO("base addr: 0x%08x\n", cx->base_addr);

	/* PCI Device Setup */
	retval = cx18_setup_pci(cx, pci_dev, pci_id);
	if (retval != 0)
		goto free_workqueues;

	/* map io memory */
	CX18_DEBUG_INFO("attempting ioremap at 0x%08x len 0x%08x\n",
		   cx->base_addr + CX18_MEM_OFFSET, CX18_MEM_SIZE);
	cx->enc_mem = ioremap_nocache(cx->base_addr + CX18_MEM_OFFSET,
				       CX18_MEM_SIZE);
	if (!cx->enc_mem) {
		CX18_ERR("ioremap failed. Can't get a window into CX23418 "
			 "memory and register space\n");
		CX18_ERR("Each capture card with a CX23418 needs 64 MB of "
			 "vmalloc address space for the window\n");
		CX18_ERR("Check the output of 'grep Vmalloc /proc/meminfo'\n");
		CX18_ERR("Use the vmalloc= kernel command line option to set "
			 "VmallocTotal to a larger value\n");
		retval = -ENOMEM;
		goto free_mem;
	}
	cx->reg_mem = cx->enc_mem + CX18_REG_OFFSET;
	devtype = cx18_read_reg(cx, 0xC72028);
	switch (devtype & 0xff000000) {
	case 0xff000000:
		CX18_INFO("cx23418 revision %08x (A)\n", devtype);
		break;
	case 0x01000000:
		CX18_INFO("cx23418 revision %08x (B)\n", devtype);
		break;
	default:
		CX18_INFO("cx23418 revision %08x (Unknown)\n", devtype);
		break;
	}

	cx18_init_power(cx, 1);
	cx18_init_memory(cx);

	cx->scb = (struct cx18_scb __iomem *)(cx->enc_mem + SCB_OFFSET);
	cx18_init_scb(cx);

	cx18_gpio_init(cx);

	/* Initialize integrated A/V decoder early to set PLLs, just in case */
	retval = cx18_av_probe(cx);
	if (retval) {
		CX18_ERR("Could not register A/V decoder subdevice\n");
		goto free_map;
	}

	/* Initialize GPIO Reset Controller to do chip resets during i2c init */
	if (cx->card->hw_all & CX18_HW_GPIO_RESET_CTRL) {
		if (cx18_gpio_register(cx, CX18_HW_GPIO_RESET_CTRL) != 0)
			CX18_WARN("Could not register GPIO reset controller"
				  "subdevice; proceeding anyway.\n");
		else
			cx->hw_flags |= CX18_HW_GPIO_RESET_CTRL;
	}

	/* active i2c  */
	CX18_DEBUG_INFO("activating i2c...\n");
	retval = init_cx18_i2c(cx);
	if (retval) {
		CX18_ERR("Could not initialize i2c\n");
		goto free_map;
	}

	if (cx->card->hw_all & CX18_HW_TVEEPROM) {
		/* Based on the model number the cardtype may be changed.
		   The PCI IDs are not always reliable. */
		const struct cx18_card *orig_card = cx->card;
		cx18_process_eeprom(cx);

		if (cx->card != orig_card) {
			/* Changed the cardtype; re-reset the I2C chips */
			cx18_gpio_init(cx);
			cx18_call_hw(cx, CX18_HW_GPIO_RESET_CTRL,
					core, reset, (u32) CX18_GPIO_RESET_I2C);
		}
	}
	if (cx->card->comment)
		CX18_INFO("%s", cx->card->comment);
	if (cx->card->v4l2_capabilities == 0) {
		retval = -ENODEV;
		goto free_i2c;
	}
	cx18_init_memory(cx);
	cx18_init_scb(cx);

	/* Register IRQ */
	retval = request_irq(cx->pci_dev->irq, cx18_irq_handler,
			     IRQF_SHARED | IRQF_DISABLED,
			     cx->v4l2_dev.name, (void *)cx);
	if (retval) {
		CX18_ERR("Failed to register irq %d\n", retval);
		goto free_i2c;
	}

	if (cx->std == 0)
		cx->std = V4L2_STD_NTSC_M;

	if (cx->options.tuner == -1) {
		for (i = 0; i < CX18_CARD_MAX_TUNERS; i++) {
			if ((cx->std & cx->card->tuners[i].std) == 0)
				continue;
			cx->options.tuner = cx->card->tuners[i].tuner;
			break;
		}
	}
	/* if no tuner was found, then pick the first tuner in the card list */
	if (cx->options.tuner == -1 && cx->card->tuners[0].std) {
		cx->std = cx->card->tuners[0].std;
		if (cx->std & V4L2_STD_PAL)
			cx->std = V4L2_STD_PAL_BG | V4L2_STD_PAL_H;
		else if (cx->std & V4L2_STD_NTSC)
			cx->std = V4L2_STD_NTSC_M;
		else if (cx->std & V4L2_STD_SECAM)
			cx->std = V4L2_STD_SECAM_L;
		cx->options.tuner = cx->card->tuners[0].tuner;
	}
	if (cx->options.radio == -1)
		cx->options.radio = (cx->card->radio_input.audio_type != 0);

	/* The card is now fully identified, continue with card-specific
	   initialization. */
	cx18_init_struct2(cx);

	cx18_init_subdevs(cx);

	if (cx->std & V4L2_STD_525_60)
		cx->is_60hz = 1;
	else
		cx->is_50hz = 1;

	cx2341x_handler_set_50hz(&cx->cxhdl, !cx->is_60hz);

	if (cx->options.radio > 0)
		cx->v4l2_cap |= V4L2_CAP_RADIO;

	if (cx->options.tuner > -1) {
		struct tuner_setup setup;

		setup.addr = ADDR_UNSET;
		setup.type = cx->options.tuner;
		setup.mode_mask = T_ANALOG_TV;  /* matches TV tuners */
		setup.tuner_callback = (setup.type == TUNER_XC2028) ?
			cx18_reset_tuner_gpio : NULL;
		cx18_call_all(cx, tuner, s_type_addr, &setup);
		if (setup.type == TUNER_XC2028) {
			static struct xc2028_ctrl ctrl = {
				.fname = XC2028_DEFAULT_FIRMWARE,
				.max_len = 64,
			};
			struct v4l2_priv_tun_config cfg = {
				.tuner = cx->options.tuner,
				.priv = &ctrl,
			};
			cx18_call_all(cx, tuner, s_config, &cfg);
		}
	}

	/* The tuner is fixed to the standard. The other inputs (e.g. S-Video)
	   are not. */
	cx->tuner_std = cx->std;
	if (cx->std == V4L2_STD_ALL)
		cx->std = V4L2_STD_NTSC_M;

	retval = cx18_streams_setup(cx);
	if (retval) {
		CX18_ERR("Error %d setting up streams\n", retval);
		goto free_irq;
	}
	retval = cx18_streams_register(cx);
	if (retval) {
		CX18_ERR("Error %d registering devices\n", retval);
		goto free_streams;
	}

	CX18_INFO("Initialized card: %s\n", cx->card_name);

	/* Load cx18 submodules (cx18-alsa) */
	request_modules(cx);
	return 0;

free_streams:
	cx18_streams_cleanup(cx, 1);
free_irq:
	free_irq(cx->pci_dev->irq, (void *)cx);
free_i2c:
	exit_cx18_i2c(cx);
free_map:
	cx18_iounmap(cx);
free_mem:
	release_mem_region(cx->base_addr, CX18_MEM_SIZE);
free_workqueues:
	destroy_workqueue(cx->in_work_queue);
err:
	if (retval == 0)
		retval = -ENODEV;
	CX18_ERR("Error %d on initialization\n", retval);

	v4l2_device_unregister(&cx->v4l2_dev);
	kfree(cx);
	return retval;
}

int cx18_init_on_first_open(struct cx18 *cx)
{
	int video_input;
	int fw_retry_count = 3;
	struct v4l2_frequency vf;
	struct cx18_open_id fh;
	v4l2_std_id std;

	fh.cx = cx;

	if (test_bit(CX18_F_I_FAILED, &cx->i_flags))
		return -ENXIO;

	if (test_and_set_bit(CX18_F_I_INITED, &cx->i_flags))
		return 0;

	while (--fw_retry_count > 0) {
		/* load firmware */
		if (cx18_firmware_init(cx) == 0)
			break;
		if (fw_retry_count > 1)
			CX18_WARN("Retry loading firmware\n");
	}

	if (fw_retry_count == 0) {
		set_bit(CX18_F_I_FAILED, &cx->i_flags);
		return -ENXIO;
	}
	set_bit(CX18_F_I_LOADED_FW, &cx->i_flags);

	/*
	 * Init the firmware twice to work around a silicon bug
	 * with the digital TS.
	 *
	 * The second firmware load requires us to normalize the APU state,
	 * or the audio for the first analog capture will be badly incorrect.
	 *
	 * I can't seem to call APU_RESETAI and have it succeed without the
	 * APU capturing audio, so we start and stop it here to do the reset
	 */

	/* MPEG Encoding, 224 kbps, MPEG Layer II, 48 ksps */
	cx18_vapi(cx, CX18_APU_START, 2, CX18_APU_ENCODING_METHOD_MPEG|0xb9, 0);
	cx18_vapi(cx, CX18_APU_RESETAI, 0);
	cx18_vapi(cx, CX18_APU_STOP, 1, CX18_APU_ENCODING_METHOD_MPEG);

	fw_retry_count = 3;
	while (--fw_retry_count > 0) {
		/* load firmware */
		if (cx18_firmware_init(cx) == 0)
			break;
		if (fw_retry_count > 1)
			CX18_WARN("Retry loading firmware\n");
	}

	if (fw_retry_count == 0) {
		set_bit(CX18_F_I_FAILED, &cx->i_flags);
		return -ENXIO;
	}

	/*
	 * The second firmware load requires us to normalize the APU state,
	 * or the audio for the first analog capture will be badly incorrect.
	 *
	 * I can't seem to call APU_RESETAI and have it succeed without the
	 * APU capturing audio, so we start and stop it here to do the reset
	 */

	/* MPEG Encoding, 224 kbps, MPEG Layer II, 48 ksps */
	cx18_vapi(cx, CX18_APU_START, 2, CX18_APU_ENCODING_METHOD_MPEG|0xb9, 0);
	cx18_vapi(cx, CX18_APU_RESETAI, 0);
	cx18_vapi(cx, CX18_APU_STOP, 1, CX18_APU_ENCODING_METHOD_MPEG);

	/* Init the A/V decoder, if it hasn't been already */
	v4l2_subdev_call(cx->sd_av, core, load_fw);

	vf.tuner = 0;
	vf.type = V4L2_TUNER_ANALOG_TV;
	vf.frequency = 6400; /* the tuner 'baseline' frequency */

	/* Set initial frequency. For PAL/SECAM broadcasts no
	   'default' channel exists AFAIK. */
	if (cx->std == V4L2_STD_NTSC_M_JP)
		vf.frequency = 1460;	/* ch. 1 91250*16/1000 */
	else if (cx->std & V4L2_STD_NTSC_M)
		vf.frequency = 1076;	/* ch. 4 67250*16/1000 */

	video_input = cx->active_input;
	cx->active_input++;	/* Force update of input */
	cx18_s_input(NULL, &fh, video_input);

	/* Let the VIDIOC_S_STD ioctl do all the work, keeps the code
	   in one place. */
	cx->std++;		/* Force full standard initialization */
	std = (cx->tuner_std == V4L2_STD_ALL) ? V4L2_STD_NTSC_M : cx->tuner_std;
	cx18_s_std(NULL, &fh, &std);
	cx18_s_frequency(NULL, &fh, &vf);
	return 0;
}

static void cx18_cancel_in_work_orders(struct cx18 *cx)
{
	int i;
	for (i = 0; i < CX18_MAX_IN_WORK_ORDERS; i++)
		cancel_work_sync(&cx->in_work_order[i].work);
}
static int appledisplay_probe(struct usb_interface *iface,
	const struct usb_device_id *id)
{
	struct backlight_properties props;
	struct appledisplay *pdata;
	struct usb_device *udev = interface_to_usbdev(iface);
	struct usb_host_interface *iface_desc;
	struct usb_endpoint_descriptor *endpoint;
	int int_in_endpointAddr = 0;
	int i, retval = -ENOMEM, brightness;
	char bl_name[20];

	
	
	iface_desc = iface->cur_altsetting;
	for (i = 0; i < iface_desc->desc.bNumEndpoints; i++) {
		endpoint = &iface_desc->endpoint[i].desc;
		if (!int_in_endpointAddr && usb_endpoint_is_int_in(endpoint)) {
			
			int_in_endpointAddr = endpoint->bEndpointAddress;
			break;
		}
	}
	if (!int_in_endpointAddr) {
		dev_err(&iface->dev, "Could not find int-in endpoint\n");
		return -EIO;
	}

	
	pdata = kzalloc(sizeof(struct appledisplay), GFP_KERNEL);
	if (!pdata) {
		retval = -ENOMEM;
		dev_err(&iface->dev, "Out of memory\n");
		goto error;
	}

	pdata->udev = udev;

	spin_lock_init(&pdata->lock);
	INIT_DELAYED_WORK(&pdata->work, appledisplay_work);

	
	pdata->msgdata = kmalloc(ACD_MSG_BUFFER_LEN, GFP_KERNEL);
	if (!pdata->msgdata) {
		retval = -ENOMEM;
		dev_err(&iface->dev,
			"Allocating buffer for control messages failed\n");
		goto error;
	}

	
	pdata->urb = usb_alloc_urb(0, GFP_KERNEL);
	if (!pdata->urb) {
		retval = -ENOMEM;
		dev_err(&iface->dev, "Allocating URB failed\n");
		goto error;
	}

	
	pdata->urbdata = usb_alloc_coherent(pdata->udev, ACD_URB_BUFFER_LEN,
		GFP_KERNEL, &pdata->urb->transfer_dma);
	if (!pdata->urbdata) {
		retval = -ENOMEM;
		dev_err(&iface->dev, "Allocating URB buffer failed\n");
		goto error;
	}

	
	usb_fill_int_urb(pdata->urb, udev,
		usb_rcvintpipe(udev, int_in_endpointAddr),
		pdata->urbdata, ACD_URB_BUFFER_LEN, appledisplay_complete,
		pdata, 1);
	if (usb_submit_urb(pdata->urb, GFP_KERNEL)) {
		retval = -EIO;
		dev_err(&iface->dev, "Submitting URB failed\n");
		goto error;
	}

	
	snprintf(bl_name, sizeof(bl_name), "appledisplay%d",
		atomic_inc_return(&count_displays) - 1);
	memset(&props, 0, sizeof(struct backlight_properties));
	props.type = BACKLIGHT_RAW;
	props.max_brightness = 0xff;
	pdata->bd = backlight_device_register(bl_name, NULL, pdata,
					      &appledisplay_bl_data, &props);
	if (IS_ERR(pdata->bd)) {
		dev_err(&iface->dev, "Backlight registration failed\n");
		retval = PTR_ERR(pdata->bd);
		goto error;
	}

	
	brightness = appledisplay_bl_get_brightness(pdata->bd);

	if (brightness < 0) {
		retval = brightness;
		dev_err(&iface->dev,
			"Error while getting initial brightness: %d\n", retval);
		goto error;
	}

	
	pdata->bd->props.brightness = brightness;

	
	usb_set_intfdata(iface, pdata);

	printk(KERN_INFO "appledisplay: Apple Cinema Display connected\n");

	return 0;

error:
	if (pdata) {
		if (pdata->urb) {
			usb_kill_urb(pdata->urb);
			if (pdata->urbdata)
				usb_free_coherent(pdata->udev, ACD_URB_BUFFER_LEN,
					pdata->urbdata, pdata->urb->transfer_dma);
			usb_free_urb(pdata->urb);
		}
		if (pdata->bd && !IS_ERR(pdata->bd))
			backlight_device_unregister(pdata->bd);
		kfree(pdata->msgdata);
	}
	usb_set_intfdata(iface, NULL);
	kfree(pdata);
	return retval;
}
Exemplo n.º 17
0
/*
 * omap4iss_video_buffer_next - Complete the current buffer and return the next
 * @video: ISS video object
 *
 * Remove the current video buffer from the DMA queue and fill its timestamp,
 * field count and state fields before waking up its completion handler.
 *
 * For capture video nodes, the buffer state is set to VB2_BUF_STATE_DONE if no
 * error has been flagged in the pipeline, or to VB2_BUF_STATE_ERROR otherwise.
 *
 * The DMA queue is expected to contain at least one buffer.
 *
 * Return a pointer to the next buffer in the DMA queue, or NULL if the queue is
 * empty.
 */
struct iss_buffer *omap4iss_video_buffer_next(struct iss_video *video)
{
	struct iss_pipeline *pipe = to_iss_pipeline(&video->video.entity);
	enum iss_pipeline_state state;
	struct iss_buffer *buf;
	unsigned long flags;
	struct timespec ts;

	spin_lock_irqsave(&video->qlock, flags);
	if (WARN_ON(list_empty(&video->dmaqueue))) {
		spin_unlock_irqrestore(&video->qlock, flags);
		return NULL;
	}

	buf = list_first_entry(&video->dmaqueue, struct iss_buffer,
			       list);
	list_del(&buf->list);
	spin_unlock_irqrestore(&video->qlock, flags);

	ktime_get_ts(&ts);
	buf->vb.v4l2_buf.timestamp.tv_sec = ts.tv_sec;
	buf->vb.v4l2_buf.timestamp.tv_usec = ts.tv_nsec / NSEC_PER_USEC;

	/* Do frame number propagation only if this is the output video node.
	 * Frame number either comes from the CSI receivers or it gets
	 * incremented here if H3A is not active.
	 * Note: There is no guarantee that the output buffer will finish
	 * first, so the input number might lag behind by 1 in some cases.
	 */
	if (video == pipe->output && !pipe->do_propagation)
		buf->vb.v4l2_buf.sequence =
			atomic_inc_return(&pipe->frame_number);
	else
		buf->vb.v4l2_buf.sequence = atomic_read(&pipe->frame_number);

	vb2_buffer_done(&buf->vb, pipe->error ?
			VB2_BUF_STATE_ERROR : VB2_BUF_STATE_DONE);
	pipe->error = false;

	spin_lock_irqsave(&video->qlock, flags);
	if (list_empty(&video->dmaqueue)) {
		spin_unlock_irqrestore(&video->qlock, flags);
		if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
			state = ISS_PIPELINE_QUEUE_OUTPUT
			      | ISS_PIPELINE_STREAM;
		else
			state = ISS_PIPELINE_QUEUE_INPUT
			      | ISS_PIPELINE_STREAM;

		spin_lock_irqsave(&pipe->lock, flags);
		pipe->state &= ~state;
		if (video->pipe.stream_state == ISS_PIPELINE_STREAM_CONTINUOUS)
			video->dmaqueue_flags |= ISS_VIDEO_DMAQUEUE_UNDERRUN;
		spin_unlock_irqrestore(&pipe->lock, flags);
		return NULL;
	}

	if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE && pipe->input != NULL) {
		spin_lock(&pipe->lock);
		pipe->state &= ~ISS_PIPELINE_STREAM;
		spin_unlock(&pipe->lock);
	}

	buf = list_first_entry(&video->dmaqueue, struct iss_buffer,
			       list);
	spin_unlock_irqrestore(&video->qlock, flags);
	buf->vb.state = VB2_BUF_STATE_ACTIVE;
	return buf;
}
Exemplo n.º 18
0
/*
 * For each partition that XPC has established communications with, there is
 * a minimum of one kernel thread assigned to perform any operation that
 * may potentially sleep or block (basically the callouts to the asynchronous
 * functions registered via xpc_connect()).
 *
 * Additional kthreads are created and destroyed by XPC as the workload
 * demands.
 *
 * A kthread is assigned to one of the active channels that exists for a given
 * partition.
 */
void
xpc_create_kthreads(struct xpc_channel *ch, int needed,
		    int ignore_disconnecting)
{
	unsigned long irq_flags;
	u64 args = XPC_PACK_ARGS(ch->partid, ch->number);
	struct xpc_partition *part = &xpc_partitions[ch->partid];
	struct task_struct *kthread;

	while (needed-- > 0) {

		/*
		 * The following is done on behalf of the newly created
		 * kthread. That kthread is responsible for doing the
		 * counterpart to the following before it exits.
		 */
		if (ignore_disconnecting) {
			if (!atomic_inc_not_zero(&ch->kthreads_assigned)) {
				/* kthreads assigned had gone to zero */
				BUG_ON(!(ch->flags &
					 XPC_C_DISCONNECTINGCALLOUT_MADE));
				break;
			}

		} else if (ch->flags & XPC_C_DISCONNECTING) {
			break;

		} else if (atomic_inc_return(&ch->kthreads_assigned) == 1 &&
			   atomic_inc_return(&part->nchannels_engaged) == 1) {
				xpc_indicate_partition_engaged(part);
		}
		(void)xpc_part_ref(part);
		xpc_msgqueue_ref(ch);

		kthread = kthread_run(xpc_kthread_start, (void *)args,
				      "xpc%02dc%d", ch->partid, ch->number);
		if (IS_ERR(kthread)) {
			/* the fork failed */

			/*
			 * NOTE: if (ignore_disconnecting &&
			 * !(ch->flags & XPC_C_DISCONNECTINGCALLOUT)) is true,
			 * then we'll deadlock if all other kthreads assigned
			 * to this channel are blocked in the channel's
			 * registerer, because the only thing that will unblock
			 * them is the xpDisconnecting callout that this
			 * failed kthread_run() would have made.
			 */

			if (atomic_dec_return(&ch->kthreads_assigned) == 0 &&
			    atomic_dec_return(&part->nchannels_engaged) == 0) {
				xpc_indicate_partition_disengaged(part);
			}
			xpc_msgqueue_deref(ch);
			xpc_part_deref(part);

			if (atomic_read(&ch->kthreads_assigned) <
			    ch->kthreads_idle_limit) {
				/*
				 * Flag this as an error only if we have an
				 * insufficient #of kthreads for the channel
				 * to function.
				 */
				spin_lock_irqsave(&ch->lock, irq_flags);
				XPC_DISCONNECT_CHANNEL(ch, xpLackOfResources,
						       &irq_flags);
				spin_unlock_irqrestore(&ch->lock, irq_flags);
			}
			break;
		}
	}
}
/**
 * radeon_irq_kms_sw_irq_get_delayed - enable software interrupt
 *
 * @rdev: radeon device pointer
 * @ring: ring whose interrupt you want to enable
 *
 * Enables the software interrupt for a specific ring (all asics).
 * The software interrupt is generally used to signal a fence on
 * a particular ring.
 */
bool radeon_irq_kms_sw_irq_get_delayed(struct radeon_device *rdev, int ring)
{
	return atomic_inc_return(&rdev->irq.ring_int[ring]) == 1;
}
Exemplo n.º 20
0
static int batadv_interface_tx(struct sk_buff *skb,
			       struct net_device *soft_iface)
{
	struct ethhdr *ethhdr = (struct ethhdr *)skb->data;
	struct batadv_priv *bat_priv = netdev_priv(soft_iface);
	struct batadv_hard_iface *primary_if = NULL;
	struct batadv_bcast_packet *bcast_packet;
	struct vlan_ethhdr *vhdr;
	__be16 ethertype = __constant_htons(ETH_P_BATMAN);
	static const uint8_t stp_addr[ETH_ALEN] = {0x01, 0x80, 0xC2, 0x00,
						   0x00, 0x00};
	static const uint8_t ectp_addr[ETH_ALEN] = {0xCF, 0x00, 0x00, 0x00,
						    0x00, 0x00};
	unsigned int header_len = 0;
	int data_len = skb->len, ret;
	short vid __maybe_unused = -1;
	bool do_bcast = false;
	uint32_t seqno;
	unsigned long brd_delay = 1;

	if (atomic_read(&bat_priv->mesh_state) != BATADV_MESH_ACTIVE)
		goto dropped;

	soft_iface->trans_start = jiffies;

	switch (ntohs(ethhdr->h_proto)) {
	case ETH_P_8021Q:
		vhdr = (struct vlan_ethhdr *)skb->data;
		vid = ntohs(vhdr->h_vlan_TCI) & VLAN_VID_MASK;

		if (vhdr->h_vlan_encapsulated_proto != ethertype)
			break;

		/* fall through */
	case ETH_P_BATMAN:
		goto dropped;
	}

	if (batadv_bla_tx(bat_priv, skb, vid))
		goto dropped;

	/* Register the client MAC in the transtable */
	if (!is_multicast_ether_addr(ethhdr->h_source))
		batadv_tt_local_add(soft_iface, ethhdr->h_source, skb->skb_iif);

	/* don't accept stp packets. STP does not help in meshes.
	 * better use the bridge loop avoidance ...
	 *
	 * The same goes for ECTP sent at least by some Cisco Switches,
	 * it might confuse the mesh when used with bridge loop avoidance.
	 */
	if (batadv_compare_eth(ethhdr->h_dest, stp_addr))
		goto dropped;

	if (batadv_compare_eth(ethhdr->h_dest, ectp_addr))
		goto dropped;

	if (is_multicast_ether_addr(ethhdr->h_dest)) {
		do_bcast = true;

		switch (atomic_read(&bat_priv->gw_mode)) {
		case BATADV_GW_MODE_SERVER:
			/* gateway servers should not send dhcp
			 * requests into the mesh
			 */
			ret = batadv_gw_is_dhcp_target(skb, &header_len);
			if (ret)
				goto dropped;
			break;
		case BATADV_GW_MODE_CLIENT:
			/* gateway clients should send dhcp requests
			 * via unicast to their gateway
			 */
			ret = batadv_gw_is_dhcp_target(skb, &header_len);
			if (ret)
				do_bcast = false;
			break;
		case BATADV_GW_MODE_OFF:
		default:
			break;
		}
	}

	/* ethernet packet should be broadcasted */
	if (do_bcast) {
		primary_if = batadv_primary_if_get_selected(bat_priv);
		if (!primary_if)
			goto dropped;

		/* in case of ARP request, we do not immediately broadcasti the
		 * packet, instead we first wait for DAT to try to retrieve the
		 * correct ARP entry
		 */
		if (batadv_dat_snoop_outgoing_arp_request(bat_priv, skb))
			brd_delay = msecs_to_jiffies(ARP_REQ_DELAY);

		if (batadv_skb_head_push(skb, sizeof(*bcast_packet)) < 0)
			goto dropped;

		bcast_packet = (struct batadv_bcast_packet *)skb->data;
		bcast_packet->header.version = BATADV_COMPAT_VERSION;
		bcast_packet->header.ttl = BATADV_TTL;

		/* batman packet type: broadcast */
		bcast_packet->header.packet_type = BATADV_BCAST;
		bcast_packet->reserved = 0;

		/* hw address of first interface is the orig mac because only
		 * this mac is known throughout the mesh
		 */
		memcpy(bcast_packet->orig,
		       primary_if->net_dev->dev_addr, ETH_ALEN);

		/* set broadcast sequence number */
		seqno = atomic_inc_return(&bat_priv->bcast_seqno);
		bcast_packet->seqno = htonl(seqno);

		batadv_add_bcast_packet_to_list(bat_priv, skb, brd_delay);

		/* a copy is stored in the bcast list, therefore removing
		 * the original skb.
		 */
		kfree_skb(skb);

	/* unicast packet */
	} else {
		if (atomic_read(&bat_priv->gw_mode) != BATADV_GW_MODE_OFF) {
			ret = batadv_gw_out_of_range(bat_priv, skb, ethhdr);
			if (ret)
				goto dropped;
		}

		if (batadv_dat_snoop_outgoing_arp_request(bat_priv, skb))
			goto dropped;

		batadv_dat_snoop_outgoing_arp_reply(bat_priv, skb);

		ret = batadv_unicast_send_skb(bat_priv, skb);
		if (ret != 0)
			goto dropped_freed;
	}

	batadv_inc_counter(bat_priv, BATADV_CNT_TX);
	batadv_add_counter(bat_priv, BATADV_CNT_TX_BYTES, data_len);
	goto end;

dropped:
	kfree_skb(skb);
dropped_freed:
	batadv_inc_counter(bat_priv, BATADV_CNT_TX_DROPPED);
end:
	if (primary_if)
		batadv_hardif_free_ref(primary_if);
	return NETDEV_TX_OK;
}
Exemplo n.º 21
0
static int __devinit gdsc_probe(struct platform_device *pdev)
{
	static atomic_t gdsc_count __initdata = ATOMIC_INIT(-1);
	struct regulator_init_data *init_data;
	struct resource *res;
	struct gdsc *sc;
	uint32_t regval;
	bool retain_mem, retain_periph;
	int i, ret;

	sc = devm_kzalloc(&pdev->dev, sizeof(struct gdsc), GFP_KERNEL);
	if (sc == NULL)
		return -ENOMEM;

	init_data = of_get_regulator_init_data(&pdev->dev, pdev->dev.of_node);
	if (init_data == NULL)
		return -ENOMEM;

	if (of_get_property(pdev->dev.of_node, "parent-supply", NULL))
		init_data->supply_regulator = "parent";

	ret = of_property_read_string(pdev->dev.of_node, "regulator-name",
				      &sc->rdesc.name);
	if (ret)
		return ret;

	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	if (res == NULL)
		return -EINVAL;
	sc->gdscr = devm_ioremap(&pdev->dev, res->start, resource_size(res));
	if (sc->gdscr == NULL)
		return -ENOMEM;

	sc->clock_count = of_property_count_strings(pdev->dev.of_node,
					    "qcom,clock-names");
	if (sc->clock_count == -EINVAL) {
		sc->clock_count = 0;
	} else if (IS_ERR_VALUE(sc->clock_count)) {
		dev_err(&pdev->dev, "Failed to get clock names\n");
		return -EINVAL;
	}

	sc->clocks = devm_kzalloc(&pdev->dev,
			sizeof(struct clk *) * sc->clock_count, GFP_KERNEL);
	if (!sc->clocks)
		return -ENOMEM;
	for (i = 0; i < sc->clock_count; i++) {
		const char *clock_name;
		of_property_read_string_index(pdev->dev.of_node,
					      "qcom,clock-names", i,
					      &clock_name);
		sc->clocks[i] = devm_clk_get(&pdev->dev, clock_name);
		if (IS_ERR(sc->clocks[i])) {
			int rc = PTR_ERR(sc->clocks[i]);
			if (rc != -EPROBE_DEFER)
				dev_err(&pdev->dev, "Failed to get %s\n",
					clock_name);
			return rc;
		}
	}

	sc->rdesc.id = atomic_inc_return(&gdsc_count);
	sc->rdesc.ops = &gdsc_ops;
	sc->rdesc.type = REGULATOR_VOLTAGE;
	sc->rdesc.owner = THIS_MODULE;
	platform_set_drvdata(pdev, sc);

	/*
	 * Disable HW trigger: collapse/restore occur based on registers writes.
	 * Disable SW override: Use hardware state-machine for sequencing.
	 */
	regval = readl_relaxed(sc->gdscr);
	regval &= ~(HW_CONTROL_MASK | SW_OVERRIDE_MASK);

	/* Configure wait time between states. */
	regval &= ~(EN_REST_WAIT_MASK | EN_FEW_WAIT_MASK | CLK_DIS_WAIT_MASK);
	regval |= EN_REST_WAIT_VAL | EN_FEW_WAIT_VAL | CLK_DIS_WAIT_VAL;
	writel_relaxed(regval, sc->gdscr);

	retain_mem = of_property_read_bool(pdev->dev.of_node,
					    "qcom,retain-mem");
	sc->toggle_mem = !retain_mem;
	retain_periph = of_property_read_bool(pdev->dev.of_node,
					    "qcom,retain-periph");
	sc->toggle_periph = !retain_periph;
	sc->toggle_logic = !of_property_read_bool(pdev->dev.of_node,
						"qcom,skip-logic-collapse");
	if (!sc->toggle_logic) {
		regval &= ~SW_COLLAPSE_MASK;
		writel_relaxed(regval, sc->gdscr);

		ret = readl_tight_poll_timeout(sc->gdscr, regval,
					regval & PWR_ON_MASK, TIMEOUT_US);
		if (ret) {
			dev_err(&pdev->dev, "%s enable timed out\n",
				sc->rdesc.name);
			return ret;
		}
	}

	for (i = 0; i < sc->clock_count; i++) {
		if (retain_mem || (regval & PWR_ON_MASK))
			clk_set_flags(sc->clocks[i], CLKFLAG_RETAIN_MEM);
		else
			clk_set_flags(sc->clocks[i], CLKFLAG_NORETAIN_MEM);

		if (retain_periph || (regval & PWR_ON_MASK))
			clk_set_flags(sc->clocks[i], CLKFLAG_RETAIN_PERIPH);
		else
			clk_set_flags(sc->clocks[i], CLKFLAG_NORETAIN_PERIPH);
	}

	sc->rdev = regulator_register(&sc->rdesc, &pdev->dev, init_data, sc,
				      pdev->dev.of_node);
	if (IS_ERR(sc->rdev)) {
		dev_err(&pdev->dev, "regulator_register(\"%s\") failed.\n",
			sc->rdesc.name);
		return PTR_ERR(sc->rdev);
	}

	return 0;
}
Exemplo n.º 22
0
static int __devinit msm_serial_hsl_probe_cir(struct platform_device *pdev)
{
	struct msm_hsl_port *msm_hsl_port;
	struct resource *uart_resource;
	struct resource *gsbi_resource;
	struct uart_port *port;
	const struct of_device_id *match;
	struct cir_platform_data *pdata;
	int ret;

	if (pdev->id == -1)
		pdev->id = atomic_inc_return(&msm_serial_hsl_next_id) - 1;

	if (unlikely(pdev->id < 0 || pdev->id >= UART_NR))
		return -ENXIO;

	printk(KERN_INFO "msm_serial_cir: detected port #%d\n", pdev->id);

	port = get_port_from_line(pdev->id);
	port->dev = &pdev->dev;
	pdata = pdev->dev.platform_data;
	if (!pdata) {
		E("%s: Assign platform_data error!!\n",
			__func__);
		return -ENXIO;
	}
	msm_hsl_port = UART_TO_MSM(port);
	msm_hsl_port->cir_set_path= pdata->cir_set_path;
	msm_hsl_port->cir_reset = pdata->cir_reset;
	msm_hsl_port->cir_power = pdata->cir_power;
	if (msm_hsl_port->cir_set_path)
		msm_hsl_port->cir_set_path(PATH_CIR);
	if (msm_hsl_port->cir_power)
		msm_hsl_port->cir_power(1);
	htc_cir_port = msm_hsl_port;
	cir_enable_flg = PATH_CIR;

	match = of_match_device(msm_hsl_match_table, &pdev->dev);
	if (!match)
		msm_hsl_port->ver_id = UARTDM_VERSION_11_13;
	else {
		D("%s () match:port->line %d, ir\n", __func__, port->line);
		msm_hsl_port->ver_id = (unsigned int)match->data;
	}

	gsbi_resource =	platform_get_resource_byname(pdev,
						     IORESOURCE_MEM,
						     "gsbi_resource");
	if (!gsbi_resource) {
		gsbi_resource = platform_get_resource(pdev, IORESOURCE_MEM, 1);
		D("%s () gsbi_resourc:port->line %d, ir\n", __func__, port->line);
	}
	msm_hsl_port->clk = clk_get(&pdev->dev, "core_clk");
	if (gsbi_resource) {
printk(KERN_INFO "msm_serial_cir: get gsbi_uart_clk and gsbi_pclk\n");
		msm_hsl_port->is_uartdm = 1;
		msm_hsl_port->pclk = clk_get(&pdev->dev, "iface_clk");
	} else {
printk(KERN_INFO "msm_serial_cir: get uartdm_clk\n");
		msm_hsl_port->is_uartdm = 0;
		msm_hsl_port->pclk = NULL;
	}

	if (unlikely(IS_ERR(msm_hsl_port->clk))) {
		printk(KERN_ERR "%s: Error getting clk\n", __func__);
		return PTR_ERR(msm_hsl_port->clk);
	}
	if (unlikely(IS_ERR(msm_hsl_port->pclk))) {
		printk(KERN_ERR "%s: Error getting pclk\n", __func__);
		return PTR_ERR(msm_hsl_port->pclk);
	}

	uart_resource = platform_get_resource_byname(pdev,
						     IORESOURCE_MEM,
						     "uartdm_resource");
	if (!uart_resource)
		uart_resource = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	if (unlikely(!uart_resource)||(!uart_resource)) {
		printk(KERN_ERR "getting uartdm_resource failed\n");
		return -ENXIO;
	}
	port->mapbase = uart_resource->start;
printk(KERN_INFO "msm_serial_hsl: port[%d] mapbase:%x\n", port->line, port->mapbase);

	port->irq = platform_get_irq(pdev, 0);
	if (unlikely((int)port->irq < 0)) {
		printk(KERN_ERR "%s: getting irq failed\n", __func__);
		return -ENXIO;
	}

	device_set_wakeup_capable(&pdev->dev, 1);
	platform_set_drvdata(pdev, port);
	pm_runtime_enable(port->dev);
#ifdef CONFIG_SERIAL_MSM_HSL_CONSOLE
	ret = device_create_file(&pdev->dev, &dev_attr_console);
	D("%s () device_create_file, port->line %d, ir\n", __func__, port->line);
	if (unlikely(ret))
		E("%s():Can't create console attribute\n", __func__);
#endif
	msm_hsl_debugfs_init(msm_hsl_port, pdev->id);

	if (msm_hsl_port->pclk) {
		clk_prepare_enable(msm_hsl_port->pclk);
		D("%s () clk_enable, port->line %d, ir\n", __func__, port->line);
	}
	ret = uart_add_one_port(&msm_hsl_uart_driver, port);
	if (msm_hsl_port->pclk) {
		D("%s () clk_disabl, port->line %d, ir\n", __func__, port->line);
		clk_disable_unprepare(msm_hsl_port->pclk);
	}

	D("%s ():port->line %d, ir\n", __func__, port->line);
		msm_hsl_port->irda_class = class_create(THIS_MODULE, "htc_irda");
	if (IS_ERR(msm_hsl_port->irda_class)) {
		ret = PTR_ERR(msm_hsl_port->irda_class);
		msm_hsl_port->irda_class = NULL;
		return -ENXIO;
	}
	msm_hsl_port->irda_dev = device_create(msm_hsl_port->irda_class,
				NULL, 0, "%s", "irda");
	if (unlikely(IS_ERR(msm_hsl_port->irda_dev))) {
		ret = PTR_ERR(msm_hsl_port->irda_dev);
		msm_hsl_port->irda_dev = NULL;
		goto err_create_ls_device;
	}
		
	ret = device_create_file(msm_hsl_port->irda_dev, &dev_attr_enable_irda);
	if (ret)
		goto err_create_ls_device_file;

	msm_hsl_port->cir_class = class_create(THIS_MODULE, "htc_cir");
	if (IS_ERR(msm_hsl_port->cir_class)) {
		ret = PTR_ERR(msm_hsl_port->cir_class);
		msm_hsl_port->cir_class = NULL;
		return -ENXIO;
	}
	msm_hsl_port->cir_dev = device_create(msm_hsl_port->cir_class,
				NULL, 0, "%s", "cir");
	if (unlikely(IS_ERR(msm_hsl_port->cir_dev))) {
		ret = PTR_ERR(msm_hsl_port->cir_dev);
		msm_hsl_port->cir_dev = NULL;
		goto err_create_ls_device;
	}
		
	ret = device_create_file(msm_hsl_port->cir_dev, &dev_attr_enable_cir);
	if (ret)
		goto err_create_ls_device_file;

	ret = device_create_file(msm_hsl_port->cir_dev, &dev_attr_reset_cir);
	if (ret)
		goto err_create_ls_device_file;

	return ret;

err_create_ls_device_file:
	device_unregister(msm_hsl_port->cir_dev);
err_create_ls_device:
	class_destroy(msm_hsl_port->cir_class);
	return ret;
	return ret;
}
Exemplo n.º 23
0
int __cfs_fail_check_set(__u32 id, __u32 value, int set)
{
	static atomic_t cfs_fail_count = ATOMIC_INIT(0);

	LASSERT(!(id & CFS_FAIL_ONCE));

	if ((cfs_fail_loc & (CFS_FAILED | CFS_FAIL_ONCE)) ==
	    (CFS_FAILED | CFS_FAIL_ONCE)) {
		atomic_set(&cfs_fail_count, 0); /* paranoia */
		return 0;
	}

	/* Fail 1/cfs_fail_val times */
	if (cfs_fail_loc & CFS_FAIL_RAND) {
		if (cfs_fail_val < 2 || cfs_rand() % cfs_fail_val > 0)
			return 0;
	}

	/* Skip the first cfs_fail_val, then fail */
	if (cfs_fail_loc & CFS_FAIL_SKIP) {
		if (atomic_inc_return(&cfs_fail_count) <= cfs_fail_val)
			return 0;
	}

	/* check cfs_fail_val... */
	if (set == CFS_FAIL_LOC_VALUE) {
		if (cfs_fail_val != -1 && cfs_fail_val != value)
			return 0;
	}

	/* Fail cfs_fail_val times, overridden by FAIL_ONCE */
	if (cfs_fail_loc & CFS_FAIL_SOME &&
	    (!(cfs_fail_loc & CFS_FAIL_ONCE) || cfs_fail_val <= 1)) {
		int count = atomic_inc_return(&cfs_fail_count);

		if (count >= cfs_fail_val) {
			set_bit(CFS_FAIL_ONCE_BIT, &cfs_fail_loc);
			atomic_set(&cfs_fail_count, 0);
			/* we are lost race to increase  */
			if (count > cfs_fail_val)
				return 0;
		}
	}

	if ((set == CFS_FAIL_LOC_ORSET || set == CFS_FAIL_LOC_RESET) &&
	    (value & CFS_FAIL_ONCE))
		set_bit(CFS_FAIL_ONCE_BIT, &cfs_fail_loc);
	/* Lost race to set CFS_FAILED_BIT. */
	if (test_and_set_bit(CFS_FAILED_BIT, &cfs_fail_loc)) {
		/* If CFS_FAIL_ONCE is valid, only one process can fail,
		 * otherwise multi-process can fail at the same time.
		 */
		if (cfs_fail_loc & CFS_FAIL_ONCE)
			return 0;
	}

	switch (set) {
	case CFS_FAIL_LOC_NOSET:
	case CFS_FAIL_LOC_VALUE:
		break;
	case CFS_FAIL_LOC_ORSET:
		cfs_fail_loc |= value & ~(CFS_FAILED | CFS_FAIL_ONCE);
		break;
	case CFS_FAIL_LOC_RESET:
		cfs_fail_loc = value;
		break;
	default:
		LASSERTF(0, "called with bad set %u\n", set);
		break;
	}

	return 1;
}
Exemplo n.º 24
0
static int appledisplay_probe(struct usb_interface *iface,
                              const struct usb_device_id *id)
{
    struct appledisplay *pdata;
    struct usb_device *udev = interface_to_usbdev(iface);
    struct usb_host_interface *iface_desc;
    struct usb_endpoint_descriptor *endpoint;
    int int_in_endpointAddr = 0;
    int i, retval = -ENOMEM, brightness;
    char bl_name[20];

    /* set up the endpoint information */
    /* use only the first interrupt-in endpoint */
    iface_desc = iface->cur_altsetting;
    for (i = 0; i < iface_desc->desc.bNumEndpoints; i++) {
        endpoint = &iface_desc->endpoint[i].desc;
        if (!int_in_endpointAddr && usb_endpoint_is_int_in(endpoint)) {
            /* we found an interrupt in endpoint */
            int_in_endpointAddr = endpoint->bEndpointAddress;
            break;
        }
    }
    if (!int_in_endpointAddr) {
        err("Could not find int-in endpoint");
        return -EIO;
    }

    /* allocate memory for our device state and initialize it */
    pdata = kzalloc(sizeof(struct appledisplay), GFP_KERNEL);
    if (!pdata) {
        retval = -ENOMEM;
        err("Out of memory");
        goto error;
    }

    pdata->udev = udev;

    spin_lock_init(&pdata->lock);
    INIT_DELAYED_WORK(&pdata->work, appledisplay_work);

    /* Allocate buffer for control messages */
    pdata->msgdata = kmalloc(ACD_MSG_BUFFER_LEN, GFP_KERNEL);
    if (!pdata->msgdata) {
        retval = -ENOMEM;
        err("appledisplay: Allocating buffer for control messages "
            "failed");
        goto error;
    }

    /* Allocate interrupt URB */
    pdata->urb = usb_alloc_urb(0, GFP_KERNEL);
    if (!pdata->urb) {
        retval = -ENOMEM;
        err("appledisplay: Allocating URB failed");
        goto error;
    }

    /* Allocate buffer for interrupt data */
    pdata->urbdata = usb_buffer_alloc(pdata->udev, ACD_URB_BUFFER_LEN,
                                      GFP_KERNEL, &pdata->urb->transfer_dma);
    if (!pdata->urbdata) {
        retval = -ENOMEM;
        err("appledisplay: Allocating URB buffer failed");
        goto error;
    }

    /* Configure interrupt URB */
    usb_fill_int_urb(pdata->urb, udev,
                     usb_rcvintpipe(udev, int_in_endpointAddr),
                     pdata->urbdata, ACD_URB_BUFFER_LEN, appledisplay_complete,
                     pdata, 1);
    if (usb_submit_urb(pdata->urb, GFP_KERNEL)) {
        retval = -EIO;
        err("appledisplay: Submitting URB failed");
        goto error;
    }

    /* Register backlight device */
    snprintf(bl_name, sizeof(bl_name), "appledisplay%d",
             atomic_inc_return(&count_displays) - 1);
    pdata->bd = backlight_device_register(bl_name, NULL, pdata,
                                          &appledisplay_bl_data);
    if (IS_ERR(pdata->bd)) {
        err("appledisplay: Backlight registration failed");
        goto error;
    }

    pdata->bd->props.max_brightness = 0xff;

    /* Try to get brightness */
    brightness = appledisplay_bl_get_brightness(pdata->bd);

    if (brightness < 0) {
        retval = brightness;
        err("appledisplay: Error while getting initial brightness: %d", retval);
        goto error;
    }

    /* Set brightness in backlight device */
    pdata->bd->props.brightness = brightness;

    /* save our data pointer in the interface device */
    usb_set_intfdata(iface, pdata);

    printk(KERN_INFO "appledisplay: Apple Cinema Display connected\n");

    return 0;

error:
    if (pdata) {
        if (pdata->urb) {
            usb_kill_urb(pdata->urb);
            if (pdata->urbdata)
                usb_buffer_free(pdata->udev, ACD_URB_BUFFER_LEN,
                                pdata->urbdata, pdata->urb->transfer_dma);
            usb_free_urb(pdata->urb);
        }
        if (pdata->bd)
            backlight_device_unregister(pdata->bd);
        kfree(pdata->msgdata);
    }
    usb_set_intfdata(iface, NULL);
    kfree(pdata);
    return retval;
}
Exemplo n.º 25
0
int rtlx_open(int index, int can_sleep)
{
	struct rtlx_info **p;
	struct rtlx_channel *chan;
	enum rtlx_state state;
	int ret = 0;

	if (index >= RTLX_CHANNELS) {
		printk(KERN_DEBUG "rtlx_open index out of range\n");
		return -ENOSYS;
	}

	if (atomic_inc_return(&channel_wqs[index].in_open) > 1) {
		printk(KERN_DEBUG "rtlx_open channel %d already opened\n",
		       index);
		ret = -EBUSY;
		goto out_fail;
	}

	if (rtlx == NULL) {
		if( (p = vpe_get_shared(tclimit)) == NULL) {
		    if (can_sleep) {
			__wait_event_interruptible(channel_wqs[index].lx_queue,
				(p = vpe_get_shared(tclimit)), ret);
			if (ret)
				goto out_fail;
		    } else {
			printk(KERN_DEBUG "No SP program loaded, and device "
					"opened with O_NONBLOCK\n");
			ret = -ENOSYS;
			goto out_fail;
		    }
		}

		smp_rmb();
		if (*p == NULL) {
			if (can_sleep) {
				DEFINE_WAIT(wait);

				for (;;) {
					prepare_to_wait(
						&channel_wqs[index].lx_queue,
						&wait, TASK_INTERRUPTIBLE);
					smp_rmb();
					if (*p != NULL)
						break;
					if (!signal_pending(current)) {
						schedule();
						continue;
					}
					ret = -ERESTARTSYS;
					goto out_fail;
				}
				finish_wait(&channel_wqs[index].lx_queue, &wait);
			} else {
				pr_err(" *vpe_get_shared is NULL. "
				       "Has an SP program been loaded?\n");
				ret = -ENOSYS;
				goto out_fail;
			}
		}

		if ((unsigned int)*p < KSEG0) {
			printk(KERN_WARNING "vpe_get_shared returned an "
			       "invalid pointer maybe an error code %d\n",
			       (int)*p);
			ret = -ENOSYS;
			goto out_fail;
		}

		if ((ret = rtlx_init(*p)) < 0)
			goto out_ret;
	}

	chan = &rtlx->channel[index];

	state = xchg(&chan->lx_state, RTLX_STATE_OPENED);
	if (state == RTLX_STATE_OPENED) {
		ret = -EBUSY;
		goto out_fail;
	}

out_fail:
	smp_mb();
	atomic_dec(&channel_wqs[index].in_open);
	smp_mb();

out_ret:
	return ret;
}
Exemplo n.º 26
0
static int __devinit cx18_probe(struct pci_dev *pci_dev,
				const struct pci_device_id *pci_id)
{
	int retval = 0;
	int i;
	u32 devtype;
	struct cx18 *cx;

	
	i = atomic_inc_return(&cx18_instance) - 1;
	if (i >= CX18_MAX_CARDS) {
		printk(KERN_ERR "cx18: cannot manage card %d, driver has a "
		       "limit of 0 - %d\n", i, CX18_MAX_CARDS - 1);
		return -ENOMEM;
	}

	cx = kzalloc(sizeof(struct cx18), GFP_ATOMIC);
	if (cx == NULL) {
		printk(KERN_ERR "cx18: cannot manage card %d, out of memory\n",
		       i);
		return -ENOMEM;
	}
	cx->pci_dev = pci_dev;
	cx->instance = i;

	retval = v4l2_device_register(&pci_dev->dev, &cx->v4l2_dev);
	if (retval) {
		printk(KERN_ERR "cx18: v4l2_device_register of card %d failed"
		       "\n", cx->instance);
		kfree(cx);
		return retval;
	}
	snprintf(cx->v4l2_dev.name, sizeof(cx->v4l2_dev.name), "cx18-%d",
		 cx->instance);
	CX18_INFO("Initializing card %d\n", cx->instance);

	cx18_process_options(cx);
	if (cx->options.cardtype == -1) {
		retval = -ENODEV;
		goto err;
	}

	retval = cx18_init_struct1(cx);
	if (retval)
		goto err;

	CX18_DEBUG_INFO("base addr: 0x%08x\n", cx->base_addr);

	
	retval = cx18_setup_pci(cx, pci_dev, pci_id);
	if (retval != 0)
		goto free_workqueues;

	
	CX18_DEBUG_INFO("attempting ioremap at 0x%08x len 0x%08x\n",
		   cx->base_addr + CX18_MEM_OFFSET, CX18_MEM_SIZE);
	cx->enc_mem = ioremap_nocache(cx->base_addr + CX18_MEM_OFFSET,
				       CX18_MEM_SIZE);
	if (!cx->enc_mem) {
		CX18_ERR("ioremap failed, perhaps increasing __VMALLOC_RESERVE in page.h\n");
		CX18_ERR("or disabling CONFIG_HIGHMEM4G into the kernel would help\n");
		retval = -ENOMEM;
		goto free_mem;
	}
	cx->reg_mem = cx->enc_mem + CX18_REG_OFFSET;
	devtype = cx18_read_reg(cx, 0xC72028);
	switch (devtype & 0xff000000) {
	case 0xff000000:
		CX18_INFO("cx23418 revision %08x (A)\n", devtype);
		break;
	case 0x01000000:
		CX18_INFO("cx23418 revision %08x (B)\n", devtype);
		break;
	default:
		CX18_INFO("cx23418 revision %08x (Unknown)\n", devtype);
		break;
	}

	cx18_init_power(cx, 1);
	cx18_init_memory(cx);

	cx->scb = (struct cx18_scb __iomem *)(cx->enc_mem + SCB_OFFSET);
	cx18_init_scb(cx);

	cx18_gpio_init(cx);

	
	retval = cx18_av_probe(cx);
	if (retval) {
		CX18_ERR("Could not register A/V decoder subdevice\n");
		goto free_map;
	}
	cx18_call_hw(cx, CX18_HW_418_AV, core, init, 0);

	
	if (cx->card->hw_all & CX18_HW_GPIO_RESET_CTRL) {
		if (cx18_gpio_register(cx, CX18_HW_GPIO_RESET_CTRL) != 0)
			CX18_WARN("Could not register GPIO reset controller"
				  "subdevice; proceeding anyway.\n");
		else
			cx->hw_flags |= CX18_HW_GPIO_RESET_CTRL;
	}

	
	CX18_DEBUG_INFO("activating i2c...\n");
	retval = init_cx18_i2c(cx);
	if (retval) {
		CX18_ERR("Could not initialize i2c\n");
		goto free_map;
	}

	if (cx->card->hw_all & CX18_HW_TVEEPROM) {
		
		cx18_process_eeprom(cx);
	}
	if (cx->card->comment)
		CX18_INFO("%s", cx->card->comment);
	if (cx->card->v4l2_capabilities == 0) {
		retval = -ENODEV;
		goto free_i2c;
	}
	cx18_init_memory(cx);
	cx18_init_scb(cx);

	
	retval = request_irq(cx->pci_dev->irq, cx18_irq_handler,
			     IRQF_SHARED | IRQF_DISABLED,
			     cx->v4l2_dev.name, (void *)cx);
	if (retval) {
		CX18_ERR("Failed to register irq %d\n", retval);
		goto free_i2c;
	}

	if (cx->std == 0)
		cx->std = V4L2_STD_NTSC_M;

	if (cx->options.tuner == -1) {
		for (i = 0; i < CX18_CARD_MAX_TUNERS; i++) {
			if ((cx->std & cx->card->tuners[i].std) == 0)
				continue;
			cx->options.tuner = cx->card->tuners[i].tuner;
			break;
		}
	}
	
	if (cx->options.tuner == -1 && cx->card->tuners[0].std) {
		cx->std = cx->card->tuners[0].std;
		if (cx->std & V4L2_STD_PAL)
			cx->std = V4L2_STD_PAL_BG | V4L2_STD_PAL_H;
		else if (cx->std & V4L2_STD_NTSC)
			cx->std = V4L2_STD_NTSC_M;
		else if (cx->std & V4L2_STD_SECAM)
			cx->std = V4L2_STD_SECAM_L;
		cx->options.tuner = cx->card->tuners[0].tuner;
	}
	if (cx->options.radio == -1)
		cx->options.radio = (cx->card->radio_input.audio_type != 0);

	
	cx18_init_struct2(cx);

	cx18_init_subdevs(cx);

	if (cx->std & V4L2_STD_525_60)
		cx->is_60hz = 1;
	else
		cx->is_50hz = 1;

	cx->params.video_gop_size = cx->is_60hz ? 15 : 12;

	if (cx->options.radio > 0)
		cx->v4l2_cap |= V4L2_CAP_RADIO;

	if (cx->options.tuner > -1) {
		struct tuner_setup setup;

		setup.addr = ADDR_UNSET;
		setup.type = cx->options.tuner;
		setup.mode_mask = T_ANALOG_TV;  
		setup.tuner_callback = (setup.type == TUNER_XC2028) ?
			cx18_reset_tuner_gpio : NULL;
		cx18_call_all(cx, tuner, s_type_addr, &setup);
		if (setup.type == TUNER_XC2028) {
			static struct xc2028_ctrl ctrl = {
				.fname = XC2028_DEFAULT_FIRMWARE,
				.max_len = 64,
			};
			struct v4l2_priv_tun_config cfg = {
				.tuner = cx->options.tuner,
				.priv = &ctrl,
			};
			cx18_call_all(cx, tuner, s_config, &cfg);
		}
	}

	
	cx->tuner_std = cx->std;

	retval = cx18_streams_setup(cx);
	if (retval) {
		CX18_ERR("Error %d setting up streams\n", retval);
		goto free_irq;
	}
	retval = cx18_streams_register(cx);
	if (retval) {
		CX18_ERR("Error %d registering devices\n", retval);
		goto free_streams;
	}

	CX18_INFO("Initialized card: %s\n", cx->card_name);
	return 0;

free_streams:
	cx18_streams_cleanup(cx, 1);
free_irq:
	free_irq(cx->pci_dev->irq, (void *)cx);
free_i2c:
	exit_cx18_i2c(cx);
free_map:
	cx18_iounmap(cx);
free_mem:
	release_mem_region(cx->base_addr, CX18_MEM_SIZE);
free_workqueues:
	destroy_workqueue(cx->in_work_queue);
	destroy_workqueue(cx->out_work_queue);
err:
	if (retval == 0)
		retval = -ENODEV;
	CX18_ERR("Error %d on initialization\n", retval);

	v4l2_device_unregister(&cx->v4l2_dev);
	kfree(cx);
	return retval;
}

int cx18_init_on_first_open(struct cx18 *cx)
{
	int video_input;
	int fw_retry_count = 3;
	struct v4l2_frequency vf;
	struct cx18_open_id fh;

	fh.cx = cx;

	if (test_bit(CX18_F_I_FAILED, &cx->i_flags))
		return -ENXIO;

	if (test_and_set_bit(CX18_F_I_INITED, &cx->i_flags))
		return 0;

	while (--fw_retry_count > 0) {
		
		if (cx18_firmware_init(cx) == 0)
			break;
		if (fw_retry_count > 1)
			CX18_WARN("Retry loading firmware\n");
	}

	if (fw_retry_count == 0) {
		set_bit(CX18_F_I_FAILED, &cx->i_flags);
		return -ENXIO;
	}
	set_bit(CX18_F_I_LOADED_FW, &cx->i_flags);

	

	
	cx18_vapi(cx, CX18_APU_START, 2, CX18_APU_ENCODING_METHOD_MPEG|0xb9, 0);
	cx18_vapi(cx, CX18_APU_RESETAI, 0);
	cx18_vapi(cx, CX18_APU_STOP, 1, CX18_APU_ENCODING_METHOD_MPEG);

	fw_retry_count = 3;
	while (--fw_retry_count > 0) {
		
		if (cx18_firmware_init(cx) == 0)
			break;
		if (fw_retry_count > 1)
			CX18_WARN("Retry loading firmware\n");
	}

	if (fw_retry_count == 0) {
		set_bit(CX18_F_I_FAILED, &cx->i_flags);
		return -ENXIO;
	}

	

	
	cx18_vapi(cx, CX18_APU_START, 2, CX18_APU_ENCODING_METHOD_MPEG|0xb9, 0);
	cx18_vapi(cx, CX18_APU_RESETAI, 0);
	cx18_vapi(cx, CX18_APU_STOP, 1, CX18_APU_ENCODING_METHOD_MPEG);

	
	v4l2_subdev_call(cx->sd_av, core, load_fw);

	vf.tuner = 0;
	vf.type = V4L2_TUNER_ANALOG_TV;
	vf.frequency = 6400; 

	
	if (cx->std == V4L2_STD_NTSC_M_JP)
		vf.frequency = 1460;	
	else if (cx->std & V4L2_STD_NTSC_M)
		vf.frequency = 1076;	

	video_input = cx->active_input;
	cx->active_input++;	
	cx18_s_input(NULL, &fh, video_input);

	
	cx->std++;		
	cx18_s_std(NULL, &fh, &cx->tuner_std);
	cx18_s_frequency(NULL, &fh, &vf);
	return 0;
}

static void cx18_cancel_in_work_orders(struct cx18 *cx)
{
	int i;
	for (i = 0; i < CX18_MAX_IN_WORK_ORDERS; i++)
		cancel_work_sync(&cx->in_work_order[i].work);
}
Exemplo n.º 27
0
/*
 * Debug: dump command.
 */
void ft_cmd_dump(struct scst_cmd *cmd, const char *caller)
{
	static atomic_t serial;
	struct ft_cmd *fcmd;
	struct fc_frame_header *fh;
	char prefix[30];
	char buf[150];

	if (!(ft_debug_logging & FT_DEBUG_IO))
		return;

	fcmd = scst_cmd_get_tgt_priv(cmd);
	fh = fc_frame_header_get(fcmd->req_frame);
	snprintf(prefix, sizeof(prefix), FT_MODULE ": cmd %2x",
		atomic_inc_return(&serial) & 0xff);

	pr_info("%s %s oid %x oxid %x resp_len %u\n",
		prefix, caller, ntoh24(fh->fh_s_id), ntohs(fh->fh_ox_id),
		scst_cmd_get_resp_data_len(cmd));
	pr_info("%s scst_cmd %p wlen %u rlen %u\n",
		prefix, cmd, fcmd->write_data_len, fcmd->read_data_len);
	pr_info("%s exp_dir %x exp_xfer_len %d exp_in_len %d\n",
		prefix, cmd->expected_data_direction,
		cmd->expected_transfer_len, cmd->expected_out_transfer_len);
	pr_info("%s dir %x data_len %lld bufflen %d out_bufflen %d\n",
		prefix, cmd->data_direction, cmd->data_len,
		cmd->bufflen, cmd->out_bufflen);
	pr_info("%s sg_cnt reg %d in %d tgt %d tgt_in %d\n",
		prefix, cmd->sg_cnt, cmd->out_sg_cnt,
		cmd->tgt_i_sg_cnt, cmd->tgt_out_sg_cnt);

	buf[0] = '\0';
	if (cmd->sent_for_exec)
		ft_cmd_flag(buf, sizeof(buf), "sent");
	if (cmd->completed)
		ft_cmd_flag(buf, sizeof(buf), "comp");
	if (cmd->ua_ignore)
		ft_cmd_flag(buf, sizeof(buf), "ua_ign");
	if (cmd->atomic)
		ft_cmd_flag(buf, sizeof(buf), "atom");
	if (cmd->double_ua_possible)
		ft_cmd_flag(buf, sizeof(buf), "dbl_ua_poss");
	if (cmd->is_send_status)
		ft_cmd_flag(buf, sizeof(buf), "send_stat");
	if (cmd->retry)
		ft_cmd_flag(buf, sizeof(buf), "retry");
	if (cmd->internal)
		ft_cmd_flag(buf, sizeof(buf), "internal");
	if (cmd->unblock_dev)
		ft_cmd_flag(buf, sizeof(buf), "unblock_dev");
	if (cmd->cmd_hw_pending)
		ft_cmd_flag(buf, sizeof(buf), "hw_pend");
	if (cmd->tgt_need_alloc_data_buf)
		ft_cmd_flag(buf, sizeof(buf), "tgt_need_alloc");
	if (cmd->tgt_i_data_buf_alloced)
		ft_cmd_flag(buf, sizeof(buf), "tgt_i_alloced");
	if (cmd->dh_data_buf_alloced)
		ft_cmd_flag(buf, sizeof(buf), "dh_alloced");
	if (cmd->expected_values_set)
		ft_cmd_flag(buf, sizeof(buf), "exp_val");
	if (cmd->sg_buff_modified)
		ft_cmd_flag(buf, sizeof(buf), "sg_buf_mod");
	if (cmd->preprocessing_only)
		ft_cmd_flag(buf, sizeof(buf), "pre_only");
	if (cmd->sn_set)
		ft_cmd_flag(buf, sizeof(buf), "sn_set");
	if (cmd->hq_cmd_inced)
		ft_cmd_flag(buf, sizeof(buf), "hq_cmd_inc");
	if (cmd->set_sn_on_restart_cmd)
		ft_cmd_flag(buf, sizeof(buf), "set_sn_on_restart");
	if (cmd->no_sgv)
		ft_cmd_flag(buf, sizeof(buf), "no_sgv");
	if (cmd->may_need_dma_sync)
		ft_cmd_flag(buf, sizeof(buf), "dma_sync");
	if (cmd->out_of_sn)
		ft_cmd_flag(buf, sizeof(buf), "oo_sn");
	if (cmd->inc_expected_sn_on_done)
		ft_cmd_flag(buf, sizeof(buf), "inc_sn_exp");
	if (cmd->done)
		ft_cmd_flag(buf, sizeof(buf), "done");
	if (cmd->finished)
		ft_cmd_flag(buf, sizeof(buf), "fin");

	pr_info("%s flags %s\n", prefix, buf);
	pr_info("%s lun %lld sn %d tag %lld cmd_flags %lx\n",
		prefix, cmd->lun, cmd->sn, cmd->tag, cmd->cmd_flags);
	pr_info("%s tgt_sn %d op_flags %x op %s\n",
		prefix, cmd->tgt_sn, cmd->op_flags, cmd->op_name);
	pr_info("%s status %x msg_status %x "
		"host_status %x driver_status %x\n",
		prefix, cmd->status, cmd->msg_status,
		cmd->host_status, cmd->driver_status);
	pr_info("%s cdb_len %d\n", prefix, cmd->cdb_len);
	snprintf(buf, sizeof(buf), "%s cdb ", prefix);
	print_hex_dump(KERN_INFO, buf, DUMP_PREFIX_NONE,
		16, 4, cmd->cdb, SCST_MAX_CDB_SIZE, 0);
}
Exemplo n.º 28
0
static __be64 form_tid(u32 hi_tid)
{
	static atomic_t tid;
	return cpu_to_be64((((u64) hi_tid) << 32) |
			   ((u32) atomic_inc_return(&tid)));
}
Exemplo n.º 29
0
Arquivo: skbuff.c Projeto: mhei/linux
/*
 * Note the allocation or reception of a socket buffer.
 */
void rxrpc_new_skb(struct sk_buff *skb, enum rxrpc_skb_trace op)
{
    const void *here = __builtin_return_address(0);
    int n = atomic_inc_return(select_skb_count(op));
    trace_rxrpc_skb(skb, op, atomic_read(&skb->users), n, here);
}
Exemplo n.º 30
0
/*
 * Setup everything required to start tracing
 */
int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
		       struct block_device *bdev,
		       struct blk_user_trace_setup *buts)
{
	struct blk_trace *old_bt, *bt = NULL;
	struct dentry *dir = NULL;
	int ret, i;

	if (!buts->buf_size || !buts->buf_nr)
		return -EINVAL;

	strncpy(buts->name, name, BLKTRACE_BDEV_SIZE);
	buts->name[BLKTRACE_BDEV_SIZE - 1] = '\0';

	/*
	 * some device names have larger paths - convert the slashes
	 * to underscores for this to work as expected
	 */
	for (i = 0; i < strlen(buts->name); i++)
		if (buts->name[i] == '/')
			buts->name[i] = '_';

	bt = kzalloc(sizeof(*bt), GFP_KERNEL);
	if (!bt)
		return -ENOMEM;

	ret = -ENOMEM;
	bt->sequence = alloc_percpu(unsigned long);
	if (!bt->sequence)
		goto err;

	bt->msg_data = __alloc_percpu(BLK_TN_MAX_MSG, __alignof__(char));
	if (!bt->msg_data)
		goto err;

	ret = -ENOENT;

	mutex_lock(&blk_tree_mutex);
	if (!blk_tree_root) {
		blk_tree_root = debugfs_create_dir("block", NULL);
		if (!blk_tree_root) {
			mutex_unlock(&blk_tree_mutex);
			goto err;
		}
	}
	mutex_unlock(&blk_tree_mutex);

	dir = debugfs_create_dir(buts->name, blk_tree_root);

	if (!dir)
		goto err;

	bt->dir = dir;
	bt->dev = dev;
	atomic_set(&bt->dropped, 0);

	ret = -EIO;
	bt->dropped_file = debugfs_create_file("dropped", 0444, dir, bt,
					       &blk_dropped_fops);
	if (!bt->dropped_file)
		goto err;

	bt->msg_file = debugfs_create_file("msg", 0222, dir, bt, &blk_msg_fops);
	if (!bt->msg_file)
		goto err;

	bt->rchan = relay_open("trace", dir, buts->buf_size,
				buts->buf_nr, &blk_relay_callbacks, bt);
	if (!bt->rchan)
		goto err;

	bt->act_mask = buts->act_mask;
	if (!bt->act_mask)
		bt->act_mask = (u16) -1;

	blk_trace_setup_lba(bt, bdev);

	/* overwrite with user settings */
	if (buts->start_lba)
		bt->start_lba = buts->start_lba;
	if (buts->end_lba)
		bt->end_lba = buts->end_lba;

	bt->pid = buts->pid;
	bt->trace_state = Blktrace_setup;

	ret = -EBUSY;
	old_bt = xchg(&q->blk_trace, bt);
	if (old_bt) {
		(void) xchg(&q->blk_trace, old_bt);
		goto err;
	}

	if (atomic_inc_return(&blk_probes_ref) == 1)
		blk_register_tracepoints();

	return 0;
err:
	blk_trace_free(bt);
	return ret;
}