static bool bts_buffer_is_full(struct bts_buffer *buf, struct bts_ctx *bts)
{
	if (buf->snapshot)
		return false;

	if (local_read(&buf->data_size) >= bts->handle.size ||
	    bts->handle.size - local_read(&buf->data_size) < BTS_RECORD_SIZE)
		return true;

	return false;
}
Exemple #2
0
static void perf_output_put_handle(struct perf_output_handle *handle)
{
	struct ring_buffer *rb = handle->rb;
	unsigned long head;

again:
	head = local_read(&rb->head);


	if (!local_dec_and_test(&rb->nest))
		goto out;

	/**
	 * Since the mmap() consumer (userspace) can run on a different CPU:
	 *
	 *   kernel				user
	 *
	 *   READ ->data_tail			READ ->data_head
	 *   smp_mb()	(A)			smp_rmb()	(C)
	 *   WRITE $data			READ $data
	 *   smp_wmb()	(B)			smp_mb()	(D)
	 *   STORE ->data_head			WRITE ->data_tail
	 *
	 * Where A pairs with D, and B pairs with C.
	 *
	 * I don't think A needs to be a full barrier because we won't in fact
	 * write data until we see the store from userspace. So we simply don't
	 * issue the data WRITE until we observe it. Be conservative for now.
	 *
	 * OTOH, D needs to be a full barrier since it separates the data READ
	 * from the tail WRITE.
	 *
	 * For B a WMB is sufficient since it separates two WRITEs, and for C
	 * an RMB is sufficient since it separates two READs.
	 *
	 * See perf_output_begin().
	 */
	smp_wmb();
	rb->user_page->data_head = head;

	if (unlikely(head != local_read(&rb->head))) {
		local_inc(&rb->nest);
		goto again;
	}

	if (handle->wakeup != local_read(&rb->wakeup))
		perf_output_wakeup(handle);

out:
	preempt_enable();
}
Exemple #3
0
/* Take data received from hardware, and send it out the tty */
void ipwireless_tty_received(struct ipw_tty *tty, unsigned char *data,
			unsigned int length)
{
	struct tty_struct *linux_tty;
	int work = 0;

	mutex_lock(&tty->ipw_tty_mutex);
	linux_tty = tty->linux_tty;
	if (linux_tty == NULL) {
		mutex_unlock(&tty->ipw_tty_mutex);
		return;
	}

	if (!local_read(&tty->open_count)) {
		mutex_unlock(&tty->ipw_tty_mutex);
		return;
	}
	mutex_unlock(&tty->ipw_tty_mutex);

	work = tty_insert_flip_string(linux_tty, data, length);

	if (work != length)
		printk(KERN_DEBUG IPWIRELESS_PCCARD_NAME
				": %d chars not inserted to flip buffer!\n",
				length - work);

	/*
	 * This may sleep if ->low_latency is set
	 */
	if (work)
		tty_flip_buffer_push(linux_tty);
}
Exemple #4
0
static int ipw_open(struct tty_struct *linux_tty, struct file *filp)
{
	int minor = linux_tty->index;
	struct ipw_tty *tty = get_tty(minor);

	if (!tty)
		return -ENODEV;

	mutex_lock(&tty->ipw_tty_mutex);

	if (tty->closing) {
		mutex_unlock(&tty->ipw_tty_mutex);
		return -ENODEV;
	}
	if (local_read(&tty->open_count) == 0)
		tty->tx_bytes_queued = 0;

	local_inc(&tty->open_count);

	tty->linux_tty = linux_tty;
	linux_tty->driver_data = tty;
	linux_tty->low_latency = 1;

	if (tty->tty_type == TTYTYPE_MODEM)
		ipwireless_ppp_open(tty->network);

	mutex_unlock(&tty->ipw_tty_mutex);

	return 0;
}
Exemple #5
0
static ssize_t port_select_store(struct device *dev,
				 struct device_attribute *attr,
				 const char *buf, size_t size)
{
	struct stm_drvdata *drvdata = dev_get_drvdata(dev->parent);
	unsigned long val, stmsper;
	int ret = 0;

	ret = kstrtoul(buf, 16, &val);
	if (ret)
		return ret;

	spin_lock(&drvdata->spinlock);
	drvdata->stmspscr = val;

	if (local_read(&drvdata->mode)) {
		CS_UNLOCK(drvdata->base);
		/* Process as per ARM's TRM recommendation */
		stmsper = readl_relaxed(drvdata->base + STMSPER);
		writel_relaxed(0x0, drvdata->base + STMSPER);
		writel_relaxed(drvdata->stmspscr, drvdata->base + STMSPSCR);
		writel_relaxed(stmsper, drvdata->base + STMSPER);
		CS_LOCK(drvdata->base);
	}
	spin_unlock(&drvdata->spinlock);

	return size;
}
static void
bts_config_buffer(struct bts_buffer *buf)
{
	int cpu = raw_smp_processor_id();
	struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds;
	struct bts_phys *phys = &buf->buf[buf->cur_buf];
	unsigned long index, thresh = 0, end = phys->size;
	struct page *page = phys->page;

	index = local_read(&buf->head);

	if (!buf->snapshot) {
		if (buf->end < phys->offset + buf_size(page))
			end = buf->end - phys->offset - phys->displacement;

		index -= phys->offset + phys->displacement;

		if (end - index > BTS_SAFETY_MARGIN)
			thresh = end - BTS_SAFETY_MARGIN;
		else if (end - index > BTS_RECORD_SIZE)
			thresh = end - BTS_RECORD_SIZE;
		else
			thresh = end;
	}

	ds->bts_buffer_base = (u64)(long)page_address(page) + phys->displacement;
	ds->bts_index = ds->bts_buffer_base + index;
	ds->bts_absolute_maximum = ds->bts_buffer_base + end;
	ds->bts_interrupt_threshold = !buf->snapshot
		? ds->bts_buffer_base + thresh
		: ds->bts_absolute_maximum + BTS_RECORD_SIZE;
}
Exemple #7
0
int etm_get_trace_id(struct etm_drvdata *drvdata)
{
	unsigned long flags;
	int trace_id = -1;

	if (!drvdata)
		goto out;

	if (!local_read(&drvdata->mode))
		return drvdata->traceid;

	pm_runtime_get_sync(drvdata->dev);

	spin_lock_irqsave(&drvdata->spinlock, flags);

	CS_UNLOCK(drvdata->base);
	trace_id = (etm_readl(drvdata, ETMTRACEIDR) & ETM_TRACEID_MASK);
	CS_LOCK(drvdata->base);

	spin_unlock_irqrestore(&drvdata->spinlock, flags);
	pm_runtime_put(drvdata->dev);

out:
	return trace_id;

}
notrace __kprobes void perfctr_irq(int irq, struct pt_regs *regs)
{
	unsigned int sum, touched = 0;
	int cpu = smp_processor_id();

	clear_softint(1 << irq);
	pcr_ops->write(PCR_PIC_PRIV);

	local_cpu_data().__nmi_count++;

	if (notify_die(DIE_NMI, "nmi", regs, 0,
		       pt_regs_trap_type(regs), SIGINT) == NOTIFY_STOP)
		touched = 1;

	sum = kstat_irqs_cpu(0, cpu);
	if (__get_cpu_var(nmi_touch)) {
		__get_cpu_var(nmi_touch) = 0;
		touched = 1;
	}
	if (!touched && __get_cpu_var(last_irq_sum) == sum) {
		local_inc(&__get_cpu_var(alert_counter));
		if (local_read(&__get_cpu_var(alert_counter)) == 5 * nmi_hz)
			die_nmi("BUG: NMI Watchdog detected LOCKUP",
				regs, panic_on_timeout);
	} else {
		__get_cpu_var(last_irq_sum) = sum;
		local_set(&__get_cpu_var(alert_counter), 0);
	}
	if (nmi_usable) {
		write_pic(picl_value(nmi_hz));
		pcr_ops->write(pcr_enable);
	}
}
Exemple #9
0
static void etm_disable(struct coresight_device *csdev)
{
	u32 mode;
	struct etm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);

	/*
	 * For as long as the tracer isn't disabled another entity can't
	 * change its status.  As such we can read the status here without
	 * fearing it will change under us.
	 */
	mode = local_read(&drvdata->mode);

	switch (mode) {
	case CS_MODE_DISABLED:
		break;
	case CS_MODE_SYSFS:
		etm_disable_sysfs(csdev);
		break;
	case CS_MODE_PERF:
		etm_disable_perf(csdev);
		break;
	default:
		WARN_ON_ONCE(mode);
		return;
	}

	if (mode)
		local_set(&drvdata->mode, CS_MODE_DISABLED);
}
Exemple #10
0
/*
 * Must be called before ipwireless_network_free().
 */
void ipwireless_tty_free(struct ipw_tty *tty)
{
	int j;
	struct ipw_network *network = ttys[tty->index]->network;

	for (j = tty->index; j < IPWIRELESS_PCMCIA_MINORS;
			j += IPWIRELESS_PCMCIA_MINOR_RANGE) {
		struct ipw_tty *ttyj = ttys[j];

		if (ttyj) {
			mutex_lock(&ttyj->ipw_tty_mutex);
			if (get_tty(j + ipw_tty_driver->minor_start) == ttyj)
				report_deregistering(ttyj);
			ttyj->closing = 1;
			if (ttyj->linux_tty != NULL) {
				mutex_unlock(&ttyj->ipw_tty_mutex);
				tty_hangup(ttyj->linux_tty);
				/* Wait till the tty_hangup has completed */
				flush_work_sync(&ttyj->linux_tty->hangup_work);
				/* FIXME: Exactly how is the tty object locked here
				   against a parallel ioctl etc */
				mutex_lock(&ttyj->ipw_tty_mutex);
			}
			while (local_read(&ttyj->open_count))
				do_ipw_close(ttyj);
			ipwireless_disassociate_network_ttys(network,
							     ttyj->channel_idx);
			tty_unregister_device(ipw_tty_driver, j);
			ttys[j] = NULL;
			mutex_unlock(&ttyj->ipw_tty_mutex);
			kfree(ttyj);
		}
	}
}
Exemple #11
0
static long stm_generic_set_options(struct stm_data *stm_data,
				    unsigned int master,
				    unsigned int channel,
				    unsigned int nr_chans,
				    unsigned long options)
{
	struct stm_drvdata *drvdata = container_of(stm_data,
						   struct stm_drvdata, stm);
	if (!(drvdata && local_read(&drvdata->mode)))
		return -EINVAL;

	if (channel >= drvdata->numsp)
		return -EINVAL;

	switch (options) {
	case STM_OPTION_GUARANTEED:
		set_bit(channel, drvdata->chs.guaranteed);
		break;

	case STM_OPTION_INVARIANT:
		clear_bit(channel, drvdata->chs.guaranteed);
		break;

	default:
		return -EINVAL;
	}

	return 0;
}
Exemple #12
0
/*
 * We need to ensure a later event_id doesn't publish a head when a former
 * event isn't done writing. However since we need to deal with NMIs we
 * cannot fully serialize things.
 *
 * We only publish the head (and generate a wakeup) when the outer-most
 * event completes.
 */
static void perf_output_get_handle(struct perf_output_handle *handle)
{
	struct ring_buffer *rb = handle->rb;

	preempt_disable();
	local_inc(&rb->nest);
	handle->wakeup = local_read(&rb->wakeup);
}
Exemple #13
0
void __kprobes nmi_watchdog_tick(struct pt_regs * regs, unsigned reason)
{
	int sum;
	int touched = 0;

	sum = read_pda(apic_timer_irqs);
	if (__get_cpu_var(nmi_touch)) {
		__get_cpu_var(nmi_touch) = 0;
		touched = 1;
	}
#ifdef CONFIG_X86_MCE
	/* Could check oops_in_progress here too, but it's safer
	   not too */
	if (atomic_read(&mce_entry) > 0)
		touched = 1;
#endif
	if (!touched && __get_cpu_var(last_irq_sum) == sum) {
		/*
		 * Ayiee, looks like this CPU is stuck ...
		 * wait a few IRQs (5 seconds) before doing the oops ...
		 */
		local_inc(&__get_cpu_var(alert_counter));
		if (local_read(&__get_cpu_var(alert_counter)) == 5*nmi_hz) {
			if (notify_die(DIE_NMI, "nmi", regs, reason, 2, SIGINT)
							== NOTIFY_STOP) {
				local_set(&__get_cpu_var(alert_counter), 0);
				return;
			}
			die_nmi("NMI Watchdog detected LOCKUP on CPU %d\n", regs);
		}
	} else {
		__get_cpu_var(last_irq_sum) = sum;
		local_set(&__get_cpu_var(alert_counter), 0);
	}
	if (nmi_perfctr_msr) {
 		if (nmi_perfctr_msr == MSR_P4_IQ_COUNTER0) {
 			/*
 			 * P4 quirks:
 			 * - An overflown perfctr will assert its interrupt
 			 *   until the OVF flag in its CCCR is cleared.
 			 * - LVTPC is masked on interrupt and must be
 			 *   unmasked by the LVTPC handler.
 			 */
 			wrmsr(MSR_P4_IQ_CCCR0, nmi_p4_cccr_val, 0);
 			apic_write(APIC_LVTPC, APIC_DM_NMI);
 		} else if (nmi_perfctr_msr == MSR_ARCH_PERFMON_PERFCTR0) {
			/*
			 * For Intel based architectural perfmon
			 * - LVTPC is masked on interrupt and must be
			 *   unmasked by the LVTPC handler.
			 */
			apic_write(APIC_LVTPC, APIC_DM_NMI);
		}
		wrmsrl(nmi_perfctr_msr, -((u64)cpu_khz * 1000 / nmi_hz));
	}
}
Exemple #14
0
static bool nouveau_switcheroo_can_switch(struct pci_dev *pdev)
{
    struct drm_device *dev = pci_get_drvdata(pdev);
    bool can_switch;

    spin_lock(&dev->count_lock);
    can_switch = (local_read(&dev->open_count) == 0);
    spin_unlock(&dev->count_lock);
    return can_switch;
}
Exemple #15
0
static ssize_t notrace stm_generic_packet(struct stm_data *stm_data,
				  unsigned int master,
				  unsigned int channel,
				  unsigned int packet,
				  unsigned int flags,
				  unsigned int size,
				  const unsigned char *payload)
{
	void __iomem *ch_addr;
	struct stm_drvdata *drvdata = container_of(stm_data,
						   struct stm_drvdata, stm);

	if (!(drvdata && local_read(&drvdata->mode)))
		return -EACCES;

	if (channel >= drvdata->numsp)
		return -EINVAL;

	ch_addr = stm_channel_addr(drvdata, channel);

	flags = (flags == STP_PACKET_TIMESTAMPED) ? STM_FLAG_TIMESTAMPED : 0;
	flags |= test_bit(channel, drvdata->chs.guaranteed) ?
			   STM_FLAG_GUARANTEED : 0;

	if (size > drvdata->write_bytes)
		size = drvdata->write_bytes;
	else
		size = rounddown_pow_of_two(size);

	switch (packet) {
	case STP_PACKET_FLAG:
		ch_addr += stm_channel_off(STM_PKT_TYPE_FLAG, flags);

		/*
		 * The generic STM core sets a size of '0' on flag packets.
		 * As such send a flag packet of size '1' and tell the
		 * core we did so.
		 */
		stm_send(ch_addr, payload, 1, drvdata->write_bytes);
		size = 1;
		break;

	case STP_PACKET_DATA:
		ch_addr += stm_channel_off(STM_PKT_TYPE_DATA, flags);
		stm_send(ch_addr, payload, size,
				drvdata->write_bytes);
		break;

	default:
		return -ENOTSUPP;
	}

	return size;
}
Exemple #16
0
static int etm_dying_cpu(unsigned int cpu)
{
	if (!etmdrvdata[cpu])
		return 0;

	spin_lock(&etmdrvdata[cpu]->spinlock);
	if (local_read(&etmdrvdata[cpu]->mode))
		etm_disable_hw(etmdrvdata[cpu]);
	spin_unlock(&etmdrvdata[cpu]->spinlock);
	return 0;
}
Exemple #17
0
static int ipw_chars_in_buffer(struct tty_struct *linux_tty)
{
	struct ipw_tty *tty = linux_tty->driver_data;

	if (!tty)
		return 0;

	if (!local_read(&tty->open_count))
		return 0;

	return tty->tx_bytes_queued;
}
int intel_bts_interrupt(void)
{
	struct bts_ctx *bts = this_cpu_ptr(&bts_ctx);
	struct perf_event *event = bts->handle.event;
	struct bts_buffer *buf;
	s64 old_head;
	int err;

	if (!event || !bts->started)
		return 0;

	buf = perf_get_aux(&bts->handle);
	/*
	 * Skip snapshot counters: they don't use the interrupt, but
	 * there's no other way of telling, because the pointer will
	 * keep moving
	 */
	if (!buf || buf->snapshot)
		return 0;

	old_head = local_read(&buf->head);
	bts_update(bts);

	/* no new data */
	if (old_head == local_read(&buf->head))
		return 0;

	perf_aux_output_end(&bts->handle, local_xchg(&buf->data_size, 0),
			    !!local_xchg(&buf->lost, 0));

	buf = perf_aux_output_begin(&bts->handle, event);
	if (!buf)
		return 1;

	err = bts_buffer_reset(buf, &bts->handle);
	if (err)
		perf_aux_output_end(&bts->handle, 0, false);

	return 1;
}
Exemple #19
0
int tmc_read_unprepare_etb(struct tmc_drvdata *drvdata)
{
	char *buf = NULL;
	enum tmc_mode mode;
	unsigned long flags;

	/* config types are set a boot time and never change */
	if (WARN_ON_ONCE(drvdata->config_type != TMC_CONFIG_TYPE_ETB &&
			 drvdata->config_type != TMC_CONFIG_TYPE_ETF))
		return -EINVAL;

	spin_lock_irqsave(&drvdata->spinlock, flags);

	/* There is no point in reading a TMC in HW FIFO mode */
	mode = readl_relaxed(drvdata->base + TMC_MODE);
	if (mode != TMC_MODE_CIRCULAR_BUFFER) {
		spin_unlock_irqrestore(&drvdata->spinlock, flags);
		return -EINVAL;
	}

	/* Re-enable the TMC if need be */
	if (local_read(&drvdata->mode) == CS_MODE_SYSFS) {
		/*
		 * The trace run will continue with the same allocated trace
		 * buffer. As such zero-out the buffer so that we don't end
		 * up with stale data.
		 *
		 * Since the tracer is still enabled drvdata::buf
		 * can't be NULL.
		 */
		memset(drvdata->buf, 0, drvdata->size);
		tmc_etb_enable_hw(drvdata);
	} else {
		/*
		 * The ETB/ETF is not tracing and the buffer was just read.
		 * As such prepare to free the trace buffer.
		 */
		buf = drvdata->buf;
		drvdata->buf = NULL;
	}

	drvdata->reading = false;
	spin_unlock_irqrestore(&drvdata->spinlock, flags);

	/*
	 * Free allocated memory outside of the spinlock.  There is no need
	 * to assert the validity of 'buf' since calling kfree(NULL) is safe.
	 */
	kfree(buf);

	return 0;
}
Exemple #20
0
static int ipw_tiocmget(struct tty_struct *linux_tty)
{
	struct ipw_tty *tty = linux_tty->driver_data;
	/* FIXME: Exactly how is the tty object locked here .. */

	if (!tty)
		return -ENODEV;

	if (!local_read(&tty->open_count))
		return -EINVAL;

	return get_control_lines(tty);
}
static void etb_dump(struct etb_drvdata *drvdata)
{
	unsigned long flags;

	spin_lock_irqsave(&drvdata->spinlock, flags);
	if (local_read(&drvdata->mode) == CS_MODE_SYSFS) {
		etb_disable_hw(drvdata);
		etb_dump_hw(drvdata);
		etb_enable_hw(drvdata);
	}
	spin_unlock_irqrestore(&drvdata->spinlock, flags);

	dev_info(drvdata->dev, "ETB dumped\n");
}
Exemple #22
0
static void perf_output_put_handle(struct perf_output_handle *handle)
{
	struct ring_buffer *rb = handle->rb;
	unsigned long head;

again:
	head = local_read(&rb->head);

	/*
	 * IRQ/NMI can happen here, which means we can miss a head update.
	 */

	if (!local_dec_and_test(&rb->nest))
		goto out;

	/*
	 * Publish the known good head. Rely on the full barrier implied
	 * by atomic_dec_and_test() order the rb->head read and this
	 * write.
	 */
	rb->user_page->data_head = head;

	/*
	 * Now check if we missed an update, rely on the (compiler)
	 * barrier in atomic_dec_and_test() to re-read rb->head.
	 */
	if (unlikely(head != local_read(&rb->head))) {
		local_inc(&rb->nest);
		goto again;
	}

	if (handle->wakeup != local_read(&rb->wakeup))
		perf_output_wakeup(handle);

out:
	preempt_enable();
}
Exemple #23
0
int tmc_read_prepare_etb(struct tmc_drvdata *drvdata)
{
	long val;
	enum tmc_mode mode;
	int ret = 0;
	unsigned long flags;

	/* config types are set a boot time and never change */
	if (WARN_ON_ONCE(drvdata->config_type != TMC_CONFIG_TYPE_ETB &&
			 drvdata->config_type != TMC_CONFIG_TYPE_ETF))
		return -EINVAL;

	spin_lock_irqsave(&drvdata->spinlock, flags);

	if (drvdata->reading) {
		ret = -EBUSY;
		goto out;
	}

	/* There is no point in reading a TMC in HW FIFO mode */
	mode = readl_relaxed(drvdata->base + TMC_MODE);
	if (mode != TMC_MODE_CIRCULAR_BUFFER) {
		ret = -EINVAL;
		goto out;
	}

	val = local_read(&drvdata->mode);
	/* Don't interfere if operated from Perf */
	if (val == CS_MODE_PERF) {
		ret = -EINVAL;
		goto out;
	}

	/* If drvdata::buf is NULL the trace data has been read already */
	if (drvdata->buf == NULL) {
		ret = -EINVAL;
		goto out;
	}

	/* Disable the TMC if need be */
	if (val == CS_MODE_SYSFS)
		tmc_etb_disable_hw(drvdata);

	drvdata->reading = true;
out:
	spin_unlock_irqrestore(&drvdata->spinlock, flags);

	return ret;
}
Exemple #24
0
static void tmc_etb_disable_hw(struct tmc_drvdata *drvdata)
{
	CS_UNLOCK(drvdata->base);

	tmc_flush_and_stop(drvdata);
	/*
	 * When operating in sysFS mode the content of the buffer needs to be
	 * read before the TMC is disabled.
	 */
	if (local_read(&drvdata->mode) == CS_MODE_SYSFS)
		tmc_etb_dump_hw(drvdata);
	tmc_disable_hw(drvdata);

	CS_LOCK(drvdata->base);
}
Exemple #25
0
static int etm_starting_cpu(unsigned int cpu)
{
	if (!etmdrvdata[cpu])
		return 0;

	spin_lock(&etmdrvdata[cpu]->spinlock);
	if (!etmdrvdata[cpu]->os_unlock) {
		etm_os_unlock(etmdrvdata[cpu]);
		etmdrvdata[cpu]->os_unlock = true;
	}

	if (local_read(&etmdrvdata[cpu]->mode))
		etm_enable_hw(etmdrvdata[cpu]);
	spin_unlock(&etmdrvdata[cpu]->spinlock);
	return 0;
}
Exemple #26
0
static int fpuemu_stat_get(void *data, u64 *val)
{
	int cpu;
	unsigned long sum = 0;

	for_each_online_cpu(cpu) {
		struct mips_fpu_emulator_stats *ps;
		local_t *pv;

		ps = &per_cpu(fpuemustats, cpu);
		pv = (void *)ps + (unsigned long)data;
		sum += local_read(pv);
	}
	*val = sum;
	return 0;
}
Exemple #27
0
static ssize_t port_enable_show(struct device *dev,
				struct device_attribute *attr, char *buf)
{
	struct stm_drvdata *drvdata = dev_get_drvdata(dev->parent);
	unsigned long val;

	if (!local_read(&drvdata->mode)) {
		val = drvdata->stmsper;
	} else {
		spin_lock(&drvdata->spinlock);
		val = readl_relaxed(drvdata->base + STMSPER);
		spin_unlock(&drvdata->spinlock);
	}

	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
}
Exemple #28
0
static void ipw_hangup(struct tty_struct *linux_tty)
{
	struct ipw_tty *tty = linux_tty->driver_data;

	if (!tty)
		return;

	mutex_lock(&tty->ipw_tty_mutex);
	if (local_read(&tty->open_count) == 0) {
		mutex_unlock(&tty->ipw_tty_mutex);
		return;
	}

	do_ipw_close(tty);

	mutex_unlock(&tty->ipw_tty_mutex);
}
Exemple #29
0
static int ipw_write_room(struct tty_struct *linux_tty)
{
	struct ipw_tty *tty = linux_tty->driver_data;
	int room;

	/* FIXME: Exactly how is the tty object locked here .. */
	if (!tty)
		return -ENODEV;

	if (!local_read(&tty->open_count))
		return -EINVAL;

	room = IPWIRELESS_TX_QUEUE_SIZE - tty->tx_bytes_queued;
	if (room < 0)
		room = 0;

	return room;
}
Exemple #30
0
static int etm4_trace_id(struct coresight_device *csdev)
{
	struct etmv4_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
	unsigned long flags;
	int trace_id = -1;

	if (!local_read(&drvdata->mode))
		return drvdata->trcid;

	spin_lock_irqsave(&drvdata->spinlock, flags);

	CS_UNLOCK(drvdata->base);
	trace_id = readl_relaxed(drvdata->base + TRCTRACEIDR);
	trace_id &= ETM_TRACEID_MASK;
	CS_LOCK(drvdata->base);

	spin_unlock_irqrestore(&drvdata->spinlock, flags);

	return trace_id;
}