示例#1
0
static void caam_fq_ern_cb(struct qman_portal *qm, struct qman_fq *fq,
			   const struct qm_mr_entry *msg)
{
	const struct qm_fd *fd;
	struct caam_drv_req *drv_req;
	const size_t size = 2 * sizeof(struct qm_sg_entry);
	struct device *qidev = &(raw_cpu_ptr(&pcpu_qipriv)->net_dev.dev);

	fd = &msg->ern.fd;

	if (qm_fd_compound != fd->format) {
		dev_err(qidev, "Non compound FD from CAAM\n");
		return;
	}

	drv_req = ((struct caam_drv_req *)phys_to_virt(fd->addr));
	if (!drv_req) {
		dev_err(qidev,
			"Can't find original request for caam response\n");
		return;
	}

	dma_unmap_single(drv_req->drv_ctx->qidev, fd->addr,
			 size, DMA_BIDIRECTIONAL);

	drv_req->cbk(drv_req, -EIO);
}
示例#2
0
/**
 * handle_percpu_devid_irq - Per CPU local irq handler with per cpu dev ids
 * @desc:	the interrupt description structure for this irq
 *
 * Per CPU interrupts on SMP machines without locking requirements. Same as
 * handle_percpu_irq() above but with the following extras:
 *
 * action->percpu_dev_id is a pointer to percpu variables which
 * contain the real device id for the cpu on which this handler is
 * called
 */
void handle_percpu_devid_irq(struct irq_desc *desc)
{
	struct irq_chip *chip = irq_desc_get_chip(desc);
	struct irqaction *action = desc->action;
	unsigned int irq = irq_desc_get_irq(desc);
	irqreturn_t res;

	kstat_incr_irqs_this_cpu(desc);

	if (chip->irq_ack)
		chip->irq_ack(&desc->irq_data);

	if (likely(action)) {
		trace_irq_handler_entry(irq, action);
		res = action->handler(irq, raw_cpu_ptr(action->percpu_dev_id));
		trace_irq_handler_exit(irq, action, res);
	} else {
		unsigned int cpu = smp_processor_id();
		bool enabled = cpumask_test_cpu(cpu, desc->percpu_enabled);

		if (enabled)
			irq_percpu_disable(desc, cpu);

		pr_err_once("Spurious%s percpu IRQ%u on CPU%u\n",
			    enabled ? " and unmasked" : "", irq, cpu);
	}

	if (chip->irq_eoi)
		chip->irq_eoi(&desc->irq_data);
}
static void caam_fq_ern_cb(struct qman_portal *qm, struct qman_fq *fq,
			   const union qm_mr_entry *msg)
{
	const struct qm_fd *fd;
	struct caam_drv_req *drv_req;
	struct device *qidev = &(raw_cpu_ptr(&pcpu_qipriv)->net_dev.dev);

	fd = &msg->ern.fd;

	if (qm_fd_get_format(fd) != qm_fd_compound) {
		dev_err(qidev, "Non-compound FD from CAAM\n");
		return;
	}

	drv_req = (struct caam_drv_req *)phys_to_virt(qm_fd_addr_get64(fd));
	if (!drv_req) {
		dev_err(qidev,
			"Can't find original request for CAAM response\n");
		return;
	}

	dma_unmap_single(drv_req->drv_ctx->qidev, qm_fd_addr(fd),
			 sizeof(drv_req->fd_sgt), DMA_BIDIRECTIONAL);

	drv_req->cbk(drv_req, -EIO);
}
示例#4
0
static bool in_exception_stack(unsigned long *stack, struct stack_info *info)
{
	unsigned long *begin, *end;
	struct pt_regs *regs;
	unsigned k;

	BUILD_BUG_ON(N_EXCEPTION_STACKS != 4);

	for (k = 0; k < N_EXCEPTION_STACKS; k++) {
		end   = (unsigned long *)raw_cpu_ptr(&orig_ist)->ist[k];
		begin = end - (exception_stack_sizes[k] / sizeof(long));
		regs  = (struct pt_regs *)end - 1;

		if (stack <= begin || stack >= end)
			continue;

		info->type	= STACK_TYPE_EXCEPTION + k;
		info->begin	= begin;
		info->end	= end;
		info->next_sp	= (unsigned long *)regs->sp;

		return true;
	}

	return false;
}
static void watchdog_disable(unsigned int cpu)
{
	struct hrtimer *hrtimer = raw_cpu_ptr(&watchdog_hrtimer);

	watchdog_set_prio(SCHED_NORMAL, 0);
	hrtimer_cancel(hrtimer);
	/* disable the perf event */
	watchdog_nmi_disable(cpu);
}
示例#6
0
文件: qi.c 项目: AlexShiLucky/linux
static enum qman_cb_dqrr_result caam_rsp_fq_dqrr_cb(struct qman_portal *p,
						    struct qman_fq *rsp_fq,
						    const struct qm_dqrr_entry *dqrr)
{
	struct caam_napi *caam_napi = raw_cpu_ptr(&pcpu_qipriv.caam_napi);
	struct caam_drv_req *drv_req;
	const struct qm_fd *fd;
	struct device *qidev = &(raw_cpu_ptr(&pcpu_qipriv)->net_dev.dev);
	u32 status;

	if (caam_qi_napi_schedule(p, caam_napi))
		return qman_cb_dqrr_stop;

	fd = &dqrr->fd;
	status = be32_to_cpu(fd->status);
	if (unlikely(status)) {
		u32 ssrc = status & JRSTA_SSRC_MASK;
		u8 err_id = status & JRSTA_CCBERR_ERRID_MASK;

		if (ssrc != JRSTA_SSRC_CCB_ERROR ||
		    err_id != JRSTA_CCBERR_ERRID_ICVCHK)
			dev_err(qidev, "Error: %#x in CAAM response FD\n",
				status);
	}

	if (unlikely(qm_fd_get_format(fd) != qm_fd_compound)) {
		dev_err(qidev, "Non-compound FD from CAAM\n");
		return qman_cb_dqrr_consume;
	}

	drv_req = (struct caam_drv_req *)phys_to_virt(qm_fd_addr_get64(fd));
	if (unlikely(!drv_req)) {
		dev_err(qidev,
			"Can't find original request for caam response\n");
		return qman_cb_dqrr_consume;
	}

	dma_unmap_single(drv_req->drv_ctx->qidev, qm_fd_addr(fd),
			 sizeof(drv_req->fd_sgt), DMA_BIDIRECTIONAL);

	drv_req->cbk(drv_req, status);
	return qman_cb_dqrr_consume;
}
示例#7
0
/**
 * hrtimer_get_res - get the timer resolution for a clock
 * @which_clock: which clock to query
 * @tp:		 pointer to timespec variable to store the resolution
 *
 * Store the resolution of the clock selected by @which_clock in the
 * variable pointed to by @tp.
 */
int hrtimer_get_res(const clockid_t which_clock, struct timespec *tp)
{
	struct hrtimer_cpu_base *cpu_base;
	int base = hrtimer_clockid_to_base(which_clock);

	cpu_base = raw_cpu_ptr(&hrtimer_bases);
	*tp = ktime_to_timespec(cpu_base->clock_base[base].resolution);

	return 0;
}
示例#8
0
static enum qman_cb_dqrr_result caam_rsp_fq_dqrr_cb(struct qman_portal *p,
					struct qman_fq *rsp_fq,
					const struct qm_dqrr_entry *dqrr)
{
	struct caam_napi *caam_napi = raw_cpu_ptr(&pcpu_qipriv.caam_napi);
	struct caam_drv_req *drv_req;
	const struct qm_fd *fd;
	const size_t size = 2 * sizeof(struct qm_sg_entry);
	struct device *qidev = &(raw_cpu_ptr(&pcpu_qipriv)->net_dev.dev);

	if (caam_qi_napi_schedule(p, caam_napi))
		return qman_cb_dqrr_stop;

	fd = &dqrr->fd;
	if (unlikely(fd->status))
		dev_err(qidev, "Error: %#x in CAAM response FD\n", fd->status);

	if (unlikely(qm_fd_compound != fd->format)) {
		dev_err(qidev, "Non compound FD from CAAM\n");
		return qman_cb_dqrr_consume;
	}

	drv_req = (struct caam_drv_req *)phys_to_virt(fd->addr);
	if (unlikely(!drv_req)) {
		dev_err(qidev,
			"Can't find original request for caam response\n");
		return qman_cb_dqrr_consume;
	}

	dma_unmap_single(drv_req->drv_ctx->qidev, fd->addr,
			 size, DMA_BIDIRECTIONAL);

	drv_req->cbk(drv_req, fd->status);

	return qman_cb_dqrr_consume;
}
static void restart_watchdog_hrtimer(void *info)
{
	struct hrtimer *hrtimer = raw_cpu_ptr(&watchdog_hrtimer);
	int ret;

	/*
	 * No need to cancel and restart hrtimer if it is currently executing
	 * because it will reprogram itself with the new period now.
	 * We should never see it unqueued here because we are running per-cpu
	 * with interrupts disabled.
	 */
	ret = hrtimer_try_to_cancel(hrtimer);
	if (ret == 1)
		hrtimer_start(hrtimer, ns_to_ktime(sample_period),
				HRTIMER_MODE_REL_PINNED);
}
示例#10
0
static void __hrtimer_init(struct hrtimer *timer, clockid_t clock_id,
			   enum hrtimer_mode mode)
{
	struct hrtimer_cpu_base *cpu_base;
	int base;

	memset(timer, 0, sizeof(struct hrtimer));

	cpu_base = raw_cpu_ptr(&hrtimer_bases);

	if (clock_id == CLOCK_REALTIME && mode != HRTIMER_MODE_ABS)
		clock_id = CLOCK_MONOTONIC;

	base = hrtimer_clockid_to_base(clock_id);
	timer->base = &cpu_base->clock_base[base];
	timerqueue_init(&timer->node);
}
示例#11
0
static void watchdog_enable(unsigned int cpu)
{
	struct hrtimer *hrtimer = raw_cpu_ptr(&watchdog_hrtimer);

	/* kick off the timer for the hardlockup detector */
	hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
	hrtimer->function = watchdog_timer_fn;

	/* Enable the perf event */
	watchdog_nmi_enable(cpu);

	/* done here because hrtimer_start can only pin to smp_processor_id() */
	hrtimer_start(hrtimer, ns_to_ktime(sample_period),
		      HRTIMER_MODE_REL_PINNED);

	/* initialize timestamp */
	watchdog_set_prio(SCHED_FIFO, MAX_RT_PRIO - 1);
	__touch_watchdog();
}
示例#12
0
文件: chip.c 项目: 3null/linux
/**
 * handle_percpu_devid_irq - Per CPU local irq handler with per cpu dev ids
 * @irq:	the interrupt number
 * @desc:	the interrupt description structure for this irq
 *
 * Per CPU interrupts on SMP machines without locking requirements. Same as
 * handle_percpu_irq() above but with the following extras:
 *
 * action->percpu_dev_id is a pointer to percpu variables which
 * contain the real device id for the cpu on which this handler is
 * called
 */
void handle_percpu_devid_irq(unsigned int irq, struct irq_desc *desc)
{
	struct irq_chip *chip = irq_desc_get_chip(desc);
	struct irqaction *action = desc->action;
	void *dev_id = raw_cpu_ptr(action->percpu_dev_id);
	irqreturn_t res;

	kstat_incr_irqs_this_cpu(irq, desc);

	if (chip->irq_ack)
		chip->irq_ack(&desc->irq_data);

	trace_irq_handler_entry(irq, action);
	res = action->handler(irq, dev_id);
	trace_irq_handler_exit(irq, action, res);

	if (chip->irq_eoi)
		chip->irq_eoi(&desc->irq_data);
}
示例#13
0
static void __init msm_timer_init(u32 dgt_hz, int sched_bits, int irq,
				  bool percpu)
{
	struct clocksource *cs = &msm_clocksource;
	int res = 0;

	msm_timer_irq = irq;
	msm_timer_has_ppi = percpu;

	msm_evt = alloc_percpu(struct clock_event_device);
	if (!msm_evt) {
		pr_err("memory allocation failed for clockevents\n");
		goto err;
	}

	if (percpu)
		res = request_percpu_irq(irq, msm_timer_interrupt,
					 "gp_timer", msm_evt);

	if (res) {
		pr_err("request_percpu_irq failed\n");
	} else {
		res = register_cpu_notifier(&msm_timer_cpu_nb);
		if (res) {
			free_percpu_irq(irq, msm_evt);
			goto err;
		}

		/* Immediately configure the timer on the boot CPU */
		msm_local_timer_setup(raw_cpu_ptr(msm_evt));
	}

err:
	writel_relaxed(TIMER_ENABLE_EN, source_base + TIMER_ENABLE);
	res = clocksource_register_hz(cs, dgt_hz);
	if (res)
		pr_err("clocksource_register failed\n");
	sched_clock_register(msm_sched_clock_read, sched_bits, dgt_hz);
	msm_delay_timer.freq = dgt_hz;
	register_current_timer_delay(&msm_delay_timer);
}
示例#14
0
swp_entry_t get_swap_page(void)
{
	swp_entry_t entry, *pentry;
	struct swap_slots_cache *cache;

	/*
	 * Preemption is allowed here, because we may sleep
	 * in refill_swap_slots_cache().  But it is safe, because
	 * accesses to the per-CPU data structure are protected by the
	 * mutex cache->alloc_lock.
	 *
	 * The alloc path here does not touch cache->slots_ret
	 * so cache->free_lock is not taken.
	 */
	cache = raw_cpu_ptr(&swp_slots);

	entry.val = 0;
	if (check_cache_active()) {
		mutex_lock(&cache->alloc_lock);
		if (cache->slots) {
repeat:
			if (cache->nr) {
				pentry = &cache->slots[cache->cur++];
				entry = *pentry;
				pentry->val = 0;
				cache->nr--;
			} else {
				if (refill_swap_slots_cache(cache))
					goto repeat;
			}
		}
		mutex_unlock(&cache->alloc_lock);
		if (entry.val)
			return entry;
	}

	get_swap_pages(1, &entry);

	return entry;
}
static struct rtable *tunnel_rtable_get(struct ip_tunnel *t,
					u32 cookie, __be32 *saddr)
{
	struct ip_tunnel_dst *idst;
	struct dst_entry *dst;

	rcu_read_lock();
	idst = raw_cpu_ptr(t->dst_cache);
	dst = rcu_dereference(idst->dst);
	if (dst && !atomic_inc_not_zero(&dst->__refcnt))
		dst = NULL;
	if (dst) {
		if (!dst->obsolete || dst->ops->check(dst, cookie)) {
			*saddr = idst->saddr;
		} else {
			tunnel_dst_reset(t);
			dst_release(dst);
			dst = NULL;
		}
	}
	rcu_read_unlock();
	return (struct rtable *)dst;
}
示例#16
0
文件: hrtimer.c 项目: AK101111/linux
static void __hrtimer_init(struct hrtimer *timer, clockid_t clock_id,
			   enum hrtimer_mode mode)
{
	struct hrtimer_cpu_base *cpu_base;
	int base;

	memset(timer, 0, sizeof(struct hrtimer));

	cpu_base = raw_cpu_ptr(&hrtimer_bases);

	if (clock_id == CLOCK_REALTIME && mode != HRTIMER_MODE_ABS)
		clock_id = CLOCK_MONOTONIC;

	base = hrtimer_clockid_to_base(clock_id);
	timer->base = &cpu_base->clock_base[base];
	timerqueue_init(&timer->node);

#ifdef CONFIG_TIMER_STATS
	timer->start_site = NULL;
	timer->start_pid = -1;
	memset(timer->start_comm, 0, TASK_COMM_LEN);
#endif
}
示例#17
0
文件: percpu_ida.c 项目: krzk/linux
/**
 * percpu_ida_free - free a tag
 * @pool: pool @tag was allocated from
 * @tag: a tag previously allocated with percpu_ida_alloc()
 *
 * Safe to be called from interrupt context.
 */
void percpu_ida_free(struct percpu_ida *pool, unsigned tag)
{
	struct percpu_ida_cpu *tags;
	unsigned long flags;
	unsigned nr_free;

	BUG_ON(tag >= pool->nr_tags);

	tags = raw_cpu_ptr(pool->tag_cpu);

	spin_lock_irqsave(&tags->lock, flags);
	tags->freelist[tags->nr_free++] = tag;

	nr_free = tags->nr_free;

	if (nr_free == 1) {
		cpumask_set_cpu(smp_processor_id(),
				&pool->cpus_have_tags);
		wake_up(&pool->wait);
	}
	spin_unlock_irqrestore(&tags->lock, flags);

	if (nr_free == pool->percpu_max_size) {
		spin_lock_irqsave(&pool->lock, flags);
		spin_lock(&tags->lock);

		if (tags->nr_free == pool->percpu_max_size) {
			move_tags(pool->freelist, &pool->nr_free,
				  tags->freelist, &tags->nr_free,
				  pool->percpu_batch_size);

			wake_up(&pool->wait);
		}
		spin_unlock(&tags->lock);
		spin_unlock_irqrestore(&pool->lock, flags);
	}
}
static noinline void tunnel_dst_set(struct ip_tunnel *t,
			   struct dst_entry *dst, __be32 saddr)
{
	__tunnel_dst_set(raw_cpu_ptr(t->dst_cache), dst, saddr);
}
示例#19
0
文件: percpu_ida.c 项目: krzk/linux
/**
 * percpu_ida_alloc - allocate a tag
 * @pool: pool to allocate from
 * @state: task state for prepare_to_wait
 *
 * Returns a tag - an integer in the range [0..nr_tags) (passed to
 * tag_pool_init()), or otherwise -ENOSPC on allocation failure.
 *
 * Safe to be called from interrupt context (assuming it isn't passed
 * TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE, of course).
 *
 * @gfp indicates whether or not to wait until a free id is available (it's not
 * used for internal memory allocations); thus if passed __GFP_RECLAIM we may sleep
 * however long it takes until another thread frees an id (same semantics as a
 * mempool).
 *
 * Will not fail if passed TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE.
 */
int percpu_ida_alloc(struct percpu_ida *pool, int state)
{
	DEFINE_WAIT(wait);
	struct percpu_ida_cpu *tags;
	unsigned long flags;
	int tag = -ENOSPC;

	tags = raw_cpu_ptr(pool->tag_cpu);
	spin_lock_irqsave(&tags->lock, flags);

	/* Fastpath */
	if (likely(tags->nr_free)) {
		tag = tags->freelist[--tags->nr_free];
		spin_unlock_irqrestore(&tags->lock, flags);
		return tag;
	}
	spin_unlock_irqrestore(&tags->lock, flags);

	while (1) {
		spin_lock_irqsave(&pool->lock, flags);
		tags = this_cpu_ptr(pool->tag_cpu);

		/*
		 * prepare_to_wait() must come before steal_tags(), in case
		 * percpu_ida_free() on another cpu flips a bit in
		 * cpus_have_tags
		 *
		 * global lock held and irqs disabled, don't need percpu lock
		 */
		if (state != TASK_RUNNING)
			prepare_to_wait(&pool->wait, &wait, state);

		if (!tags->nr_free)
			alloc_global_tags(pool, tags);
		if (!tags->nr_free)
			steal_tags(pool, tags);

		if (tags->nr_free) {
			tag = tags->freelist[--tags->nr_free];
			if (tags->nr_free)
				cpumask_set_cpu(smp_processor_id(),
						&pool->cpus_have_tags);
		}

		spin_unlock_irqrestore(&pool->lock, flags);

		if (tag >= 0 || state == TASK_RUNNING)
			break;

		if (signal_pending_state(state, current)) {
			tag = -ERESTARTSYS;
			break;
		}

		schedule();
	}
	if (state != TASK_RUNNING)
		finish_wait(&pool->wait, &wait);

	return tag;
}