Beispiel #1
0
static void hangcheck_fire(unsigned long data)
{
	unsigned long long cur_tsc, tsc_diff;

	cur_tsc = ktime_get_ns();

	if (cur_tsc > hangcheck_tsc)
		tsc_diff = cur_tsc - hangcheck_tsc;
	else
		tsc_diff = (cur_tsc + (~0ULL - hangcheck_tsc)); /* or something */

	if (tsc_diff > hangcheck_tsc_margin) {
		if (hangcheck_dump_tasks) {
			printk(KERN_CRIT "Hangcheck: Task state:\n");
#ifdef CONFIG_MAGIC_SYSRQ
			handle_sysrq('t');
#endif  /* CONFIG_MAGIC_SYSRQ */
		}
		if (hangcheck_reboot) {
			printk(KERN_CRIT "Hangcheck: hangcheck is restarting the machine.\n");
			emergency_restart();
		} else {
			printk(KERN_CRIT "Hangcheck: hangcheck value past margin!\n");
		}
	}
#if 0
	/*
	 * Enable to investigate delays in detail
	 */
	printk("Hangcheck: called %Ld ns since last time (%Ld ns overshoot)\n",
			tsc_diff, tsc_diff - hangcheck_tick*TIMER_FREQ);
#endif
	mod_timer(&hangcheck_ticktock, jiffies + (hangcheck_tick*HZ));
	hangcheck_tsc = ktime_get_ns();
}
Beispiel #2
0
static int terminate_request(struct cros_ec_device *ec_dev)
{
	struct cros_ec_spi *ec_spi = ec_dev->priv;
	struct spi_message msg;
	struct spi_transfer trans;
	int ret;

	/*
	 * Turn off CS, possibly adding a delay to ensure the rising edge
	 * doesn't come too soon after the end of the data.
	 */
	spi_message_init(&msg);
	memset(&trans, 0, sizeof(trans));
	trans.delay_usecs = ec_spi->end_of_msg_delay;
	spi_message_add_tail(&trans, &msg);

	ret = spi_sync(ec_spi->spi, &msg);

	/* Reset end-of-response timer */
	ec_spi->last_transfer_ns = ktime_get_ns();
	if (ret < 0) {
		dev_err(ec_dev->dev,
			"cs-deassert spi transfer failed: %d\n",
			ret);
	}

	return ret;
}
void proc_fork_connector(struct task_struct *task)
{
	struct cn_msg *msg;
	struct proc_event *ev;
	__u8 buffer[CN_PROC_MSG_SIZE] __aligned(8);
	struct task_struct *parent;

	if (atomic_read(&proc_event_num_listeners) < 1)
		return;

	msg = buffer_to_cn_msg(buffer);
	ev = (struct proc_event *)msg->data;
	memset(&ev->event_data, 0, sizeof(ev->event_data));
	ev->timestamp_ns = ktime_get_ns();
	ev->what = PROC_EVENT_FORK;
	rcu_read_lock();
	parent = rcu_dereference(task->real_parent);
	ev->event_data.fork.parent_pid = parent->pid;
	ev->event_data.fork.parent_tgid = parent->tgid;
	rcu_read_unlock();
	ev->event_data.fork.child_pid = task->pid;
	ev->event_data.fork.child_tgid = task->tgid;

	memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
	msg->ack = 0; /* not used */
	msg->len = sizeof(*ev);
	msg->flags = 0; /* not used */
	send_msg(msg);
}
void proc_ptrace_connector(struct task_struct *task, int ptrace_id)
{
	struct cn_msg *msg;
	struct proc_event *ev;
	__u8 buffer[CN_PROC_MSG_SIZE] __aligned(8);

	if (atomic_read(&proc_event_num_listeners) < 1)
		return;

	msg = buffer_to_cn_msg(buffer);
	ev = (struct proc_event *)msg->data;
	memset(&ev->event_data, 0, sizeof(ev->event_data));
	ev->timestamp_ns = ktime_get_ns();
	ev->what = PROC_EVENT_PTRACE;
	ev->event_data.ptrace.process_pid  = task->pid;
	ev->event_data.ptrace.process_tgid = task->tgid;
	if (ptrace_id == PTRACE_ATTACH) {
		ev->event_data.ptrace.tracer_pid  = current->pid;
		ev->event_data.ptrace.tracer_tgid = current->tgid;
	} else if (ptrace_id == PTRACE_DETACH) {
		ev->event_data.ptrace.tracer_pid  = 0;
		ev->event_data.ptrace.tracer_tgid = 0;
	} else
		return;

	memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
	msg->ack = 0; /* not used */
	msg->len = sizeof(*ev);
	msg->flags = 0; /* not used */
	send_msg(msg);
}
Beispiel #5
0
static int icmp_hnd_timestamp_request(const struct icmphdr *icmph,
		const struct icmpbody_timestamp *tstamp_req,
		struct sk_buff *skb) {
	uint32_t msec_since_12am;
	struct icmpbody_timestamp *tstamp_rep;

	if (icmph->code != 0) {
		skb_free(skb);
		return 0; /* error: bad code */
	}

	if (sizeof *icmph + sizeof *tstamp_req
			!= ip_data_length(ip_hdr(skb))) {
		skb_free(skb);
		return 0; /* error: invalid length */
	}

	msec_since_12am = (ktime_get_ns() / NSEC_PER_MSEC) % (
				SEC_PER_DAY * MSEC_PER_SEC);

	tstamp_rep = &icmp_hdr(skb)->body[0].timestamp;
	tstamp_rep->orig = tstamp_req->trans;
	tstamp_rep->recv = tstamp_rep->trans = htonl(msec_since_12am);

	return icmp_send(ICMP_TIMESTAMP_REPLY, 0, tstamp_rep,
			sizeof *tstamp_rep, skb);
}
void proc_exit_connector(struct task_struct *task)
{
	struct cn_msg *msg;
	struct proc_event *ev;
	__u8 buffer[CN_PROC_MSG_SIZE] __aligned(8);

	if (atomic_read(&proc_event_num_listeners) < 1)
		return;

	msg = buffer_to_cn_msg(buffer);
	ev = (struct proc_event *)msg->data;
	memset(&ev->event_data, 0, sizeof(ev->event_data));
	ev->timestamp_ns = ktime_get_ns();
	ev->what = PROC_EVENT_EXIT;
	ev->event_data.exit.process_pid = task->pid;
	ev->event_data.exit.process_tgid = task->tgid;
	ev->event_data.exit.exit_code = task->exit_code;
	ev->event_data.exit.exit_signal = task->exit_signal;

	memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
	msg->ack = 0; /* not used */
	msg->len = sizeof(*ev);
	msg->flags = 0; /* not used */
	send_msg(msg);
}
Beispiel #7
0
static void imx7_csi_vb2_buf_done(struct imx7_csi *csi)
{
	struct imx_media_video_dev *vdev = csi->vdev;
	struct imx_media_buffer *done, *next;
	struct vb2_buffer *vb;
	dma_addr_t phys;

	done = csi->active_vb2_buf[csi->buf_num];
	if (done) {
		done->vbuf.field = vdev->fmt.fmt.pix.field;
		done->vbuf.sequence = csi->frame_sequence;
		vb = &done->vbuf.vb2_buf;
		vb->timestamp = ktime_get_ns();
		vb2_buffer_done(vb, VB2_BUF_STATE_DONE);
	}
	csi->frame_sequence++;

	/* get next queued buffer */
	next = imx_media_capture_device_next_buf(vdev);
	if (next) {
		phys = vb2_dma_contig_plane_dma_addr(&next->vbuf.vb2_buf, 0);
		csi->active_vb2_buf[csi->buf_num] = next;
	} else {
		phys = csi->underrun_buf.phys;
		csi->active_vb2_buf[csi->buf_num] = NULL;
	}

	imx7_csi_update_buf(csi, phys, csi->buf_num);
}
Beispiel #8
0
static struct sk_buff *tbf_dequeue(struct Qdisc *sch)
{
	struct tbf_sched_data *q = qdisc_priv(sch);
	struct sk_buff *skb;

	skb = q->qdisc->ops->peek(q->qdisc);

	if (skb) {
		s64 now;
		s64 toks;
		s64 ptoks = 0;
		unsigned int len = qdisc_pkt_len(skb);

		now = ktime_get_ns();
		toks = min_t(s64, now - q->t_c, q->buffer);

		if (tbf_peak_present(q)) {
			ptoks = toks + q->ptokens;
			if (ptoks > q->mtu)
				ptoks = q->mtu;
			ptoks -= (s64) psched_l2t_ns(&q->peak, len);
		}
		toks += q->tokens;
		if (toks > q->buffer)
			toks = q->buffer;
		toks -= (s64) psched_l2t_ns(&q->rate, len);

		if ((toks|ptoks) >= 0) {
			skb = qdisc_dequeue_peeked(q->qdisc);
			if (unlikely(!skb))
				return NULL;

			q->t_c = now;
			q->tokens = toks;
			q->ptokens = ptoks;
			qdisc_qstats_backlog_dec(sch, skb);
			sch->q.qlen--;
			qdisc_bstats_update(sch, skb);
			return skb;
		}

		qdisc_watchdog_schedule_ns(&q->watchdog,
					   now + max_t(long, -toks, -ptoks));

		/* Maybe we have a shorter packet in the queue,
		   which can be sent now. It sounds cool,
		   but, however, this is wrong in principle.
		   We MUST NOT reorder packets under these circumstances.

		   Really, if we split the flow into independent
		   subflows, it would be a very good solution.
		   This is the main idea of all FQ algorithms
		   (cf. CSZ, HPFQ, HFSC)
		 */

		qdisc_qstats_overlimit(sch);
	}
	return NULL;
}
Beispiel #9
0
/* TCN marking scheme */
static inline void tcn_marking(struct sk_buff *skb)
{
        codel_time_t delay;
        delay = ns_to_codel_time(ktime_get_ns() - skb->tstamp.tv64);

        if (codel_time_after(delay, (codel_time_t)wfq_tcn_thresh))
                INET_ECN_set_ce(skb);
}
Beispiel #10
0
static int fq_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
{
	struct fq_sched_data *q = qdisc_priv(sch);
	u64 now = ktime_get_ns();
	struct tc_fq_qd_stats st = {
		.gc_flows		= q->stat_gc_flows,
		.highprio_packets	= q->stat_internal_packets,
		.tcp_retrans		= q->stat_tcp_retrans,
		.throttled		= q->stat_throttled,
		.flows_plimit		= q->stat_flows_plimit,
		.pkts_too_long		= q->stat_pkts_too_long,
		.allocation_errors	= q->stat_allocation_errors,
		.flows			= q->flows,
		.inactive_flows		= q->inactive_flows,
		.throttled_flows	= q->throttled_flows,
		.time_next_delayed_flow	= q->time_next_delayed_flow - now,
	};

	return gnet_stats_copy_app(d, &st, sizeof(st));
}

static struct Qdisc_ops fq_qdisc_ops __read_mostly = {
	.id		=	"fq",
	.priv_size	=	sizeof(struct fq_sched_data),

	.enqueue	=	fq_enqueue,
	.dequeue	=	fq_dequeue,
	.peek		=	qdisc_peek_dequeued,
	.init		=	fq_init,
	.reset		=	fq_reset,
	.destroy	=	fq_destroy,
	.change		=	fq_change,
	.dump		=	fq_dump,
	.dump_stats	=	fq_dump_stats,
	.owner		=	THIS_MODULE,
};

static int __init fq_module_init(void)
{
	int ret;

	fq_flow_cachep = kmem_cache_create("fq_flow_cache",
					   sizeof(struct fq_flow),
					   0, 0, NULL);
	if (!fq_flow_cachep)
		return -ENOMEM;

	ret = register_qdisc(&fq_qdisc_ops);
	if (ret)
		kmem_cache_destroy(fq_flow_cachep);
	return ret;
}

static void __exit fq_module_exit(void)
{
	unregister_qdisc(&fq_qdisc_ops);
	kmem_cache_destroy(fq_flow_cachep);
}
Beispiel #11
0
/* CoDel ECN marking. Borrow from codel_dequeue in Linux kernel */
static void codel_marking(struct sk_buff *skb, struct wfq_class *cl)
{
	s64 now_ns = ktime_get_ns();
	codel_time_t now = ns_to_codel_time(now_ns);
	bool mark = codel_should_mark(skb, cl, now_ns);

	if (cl->marking)
	{
		if (!mark)
		{
			/* sojourn time below target - leave marking state */
			cl->marking = false;
		}
		else if (codel_time_after_eq(now, cl->mark_next))
		{
			/* It's time for the next mark */
			cl->count++;
			codel_Newton_step(cl);
			cl->mark_next = codel_control_law(cl->mark_next,
					  	          wfq_codel_interval,
					                  cl->rec_inv_sqrt);
			INET_ECN_set_ce(skb);
		}
	}
	else if (mark)
	{
		u32 delta;

		INET_ECN_set_ce(skb);
		cl->marking = true;
		/* if min went above target close to when we last went below it
         	 * assume that the drop rate that controlled the queue on the
         	 * last cycle is a good starting point to control it now.
         	 */
		delta = cl->count - cl->lastcount;
 		if (delta > 1 &&
 		    codel_time_before(now - cl->mark_next,
 				      (codel_time_t)wfq_codel_interval * 16))
 		{
         		cl->count = delta;
             		/* we dont care if rec_inv_sqrt approximation
              		 * is not very precise :
              		 * Next Newton steps will correct it quadratically.
              		 */
         		codel_Newton_step(cl);
 		}
 		else
 		{
 			cl->count = 1;
 			cl->rec_inv_sqrt = ~0U >> REC_INV_SQRT_SHIFT;
 		}
 		cl->lastcount = cl->count;
 		cl->mark_next = codel_control_law(now,
 						  wfq_codel_interval,
 						  cl->rec_inv_sqrt);
	}
}
Beispiel #12
0
/*
 * Finish delay accounting for a statistic using its timestamps (@start),
 * accumalator (@total) and @count
 */
static void delayacct_end(u64 *start, u64 *total, u32 *count)
{
	s64 ns = ktime_get_ns() - *start;
	unsigned long flags;

	if (ns > 0) {
		spin_lock_irqsave(&current->delays->lock, flags);
		*total += ns;
		(*count)++;
		spin_unlock_irqrestore(&current->delays->lock, flags);
	}
}
Beispiel #13
0
static void tbf_reset(struct Qdisc *sch)
{
	struct tbf_sched_data *q = qdisc_priv(sch);

	qdisc_reset(q->qdisc);
	sch->qstats.backlog = 0;
	sch->q.qlen = 0;
	q->t_c = ktime_get_ns();
	q->tokens = q->buffer;
	q->ptokens = q->mtu;
	qdisc_watchdog_cancel(&q->watchdog);
}
Beispiel #14
0
static int __init hangcheck_init(void)
{
	printk("Hangcheck: starting hangcheck timer %s (tick is %d seconds, margin is %d seconds).\n",
	       VERSION_STR, hangcheck_tick, hangcheck_margin);
	hangcheck_tsc_margin =
		(unsigned long long)hangcheck_margin + hangcheck_tick;
	hangcheck_tsc_margin *= TIMER_FREQ;

	hangcheck_tsc = ktime_get_ns();
	mod_timer(&hangcheck_ticktock, jiffies + (hangcheck_tick*HZ));

	return 0;
}
Beispiel #15
0
static int udelay_test_single(struct seq_file *s, int usecs, uint32_t iters)
{
	int min = 0, max = 0, fail_count = 0;
	uint64_t sum = 0;
	uint64_t avg;
	int i;
	/* Allow udelay to be up to 0.5% fast */
	int allowed_error_ns = usecs * 5;

	for (i = 0; i < iters; ++i) {
		s64 kt1, kt2;
		int time_passed;

		kt1 = ktime_get_ns();
		udelay(usecs);
		kt2 = ktime_get_ns();
		time_passed = kt2 - kt1;

		if (i == 0 || time_passed < min)
			min = time_passed;
		if (i == 0 || time_passed > max)
			max = time_passed;
		if ((time_passed + allowed_error_ns) / 1000 < usecs)
			++fail_count;
		WARN_ON(time_passed < 0);
		sum += time_passed;
	}

	avg = sum;
	do_div(avg, iters);
	seq_printf(s, "%d usecs x %d: exp=%d allowed=%d min=%d avg=%lld max=%d",
			usecs, iters, usecs * 1000,
			(usecs * 1000) - allowed_error_ns, min, avg, max);
	if (fail_count)
		seq_printf(s, " FAIL=%d", fail_count);
	seq_puts(s, "\n");

	return 0;
}
Beispiel #16
0
/* Initialize Qdisc */
static int wfq_init(struct Qdisc *sch, struct nlattr *opt)
{
	int i;
	struct wfq_sched_data *q = qdisc_priv(sch);
	struct Qdisc *child;

	if(sch->parent != TC_H_ROOT)
		return -EOPNOTSUPP;

        q->tokens = 0;
        q->time_ns = ktime_get_ns();
        q->sum_len_bytes = 0;
	qdisc_watchdog_init(&q->watchdog, sch);

        /* Initialize per-priority variables */
	for (i = 0; i < wfq_max_prio; i++)
	{
		q->prio_len_bytes[i] = 0;
		q->virtual_time[i] = 0;
	}

	/* Initialize per-queue variables */
	for (i = 0; i < wfq_max_queues; i++)
	{
		/* bfifo is in bytes */
		child = fifo_create_dflt(sch,
                                         &bfifo_qdisc_ops,
                                         wfq_max_buffer_bytes);
		if (likely(child))
			(q->queues[i]).qdisc = child;
		else
			goto err;

                (q->queues[i]).id = i;
		(q->queues[i]).head_fin_time = 0;
                (q->queues[i]).len_bytes = 0;
                (q->queues[i]).count = 0;
                (q->queues[i]).lastcount = 0;
                (q->queues[i]).marking = false;
                (q->queues[i]).rec_inv_sqrt = 0;
                (q->queues[i]).first_above_time = 0;
                (q->queues[i]).mark_next = false;
                (q->queues[i]).ldelay = 0;
	}

	return wfq_change(sch, opt);
err:
	wfq_destroy(sch);
	return -ENOMEM;
}
Beispiel #17
0
static int tbf_init(struct Qdisc *sch, struct nlattr *opt,
		    struct netlink_ext_ack *extack)
{
	struct tbf_sched_data *q = qdisc_priv(sch);

	qdisc_watchdog_init(&q->watchdog, sch);
	q->qdisc = &noop_qdisc;

	if (!opt)
		return -EINVAL;

	q->t_c = ktime_get_ns();

	return tbf_change(sch, opt, extack);
}
Beispiel #18
0
static irqreturn_t dt3155_irq_handler_even(int irq, void *dev_id)
{
	struct dt3155_priv *ipd = dev_id;
	struct vb2_buffer *ivb;
	dma_addr_t dma_addr;
	u32 tmp;

	tmp = ioread32(ipd->regs + INT_CSR) & (FLD_START | FLD_END_ODD);
	if (!tmp)
		return IRQ_NONE;  /* not our irq */
	if ((tmp & FLD_START) && !(tmp & FLD_END_ODD)) {
		iowrite32(FLD_START_EN | FLD_END_ODD_EN | FLD_START,
							ipd->regs + INT_CSR);
		return IRQ_HANDLED; /* start of field irq */
	}
	tmp = ioread32(ipd->regs + CSR1) & (FLD_CRPT_EVEN | FLD_CRPT_ODD);
	if (tmp) {
		iowrite32(FIFO_EN | SRST | FLD_CRPT_ODD | FLD_CRPT_EVEN |
						FLD_DN_ODD | FLD_DN_EVEN |
						CAP_CONT_EVEN | CAP_CONT_ODD,
							ipd->regs + CSR1);
		mmiowb();
	}

	spin_lock(&ipd->lock);
	if (ipd->curr_buf && !list_empty(&ipd->dmaq)) {
		ipd->curr_buf->vb2_buf.timestamp = ktime_get_ns();
		ipd->curr_buf->sequence = ipd->sequence++;
		ipd->curr_buf->field = V4L2_FIELD_NONE;
		vb2_buffer_done(&ipd->curr_buf->vb2_buf, VB2_BUF_STATE_DONE);

		ivb = list_first_entry(&ipd->dmaq, typeof(*ivb), done_entry);
		list_del(&ivb->done_entry);
		ipd->curr_buf = to_vb2_v4l2_buffer(ivb);
		dma_addr = vb2_dma_contig_plane_dma_addr(ivb, 0);
		iowrite32(dma_addr, ipd->regs + EVEN_DMA_START);
		iowrite32(dma_addr + ipd->width, ipd->regs + ODD_DMA_START);
		iowrite32(ipd->width, ipd->regs + EVEN_DMA_STRIDE);
		iowrite32(ipd->width, ipd->regs + ODD_DMA_STRIDE);
		mmiowb();
	}

	/* enable interrupts, clear all irq flags */
	iowrite32(FLD_START_EN | FLD_END_ODD_EN | FLD_START |
			FLD_END_EVEN | FLD_END_ODD, ipd->regs + INT_CSR);
	spin_unlock(&ipd->lock);
	return IRQ_HANDLED;
}
Beispiel #19
0
/*
 * Obtain a high-resolution time stamp value. The time stamp is used to measure
 * the execution time of a given code path and its variations. Hence, the time
 * stamp must have a sufficiently high resolution.
 *
 * Note, if the function returns zero because a given architecture does not
 * implement a high-resolution time stamp, the RNG code's runtime test
 * will detect it and will not produce output.
 */
void jent_get_nstime(__u64 *out)
{
    __u64 tmp = 0;

    tmp = random_get_entropy();

    /*
     * If random_get_entropy does not return a value, i.e. it is not
     * implemented for a given architecture, use a clock source.
     * hoping that there are timers we can work with.
     */
    if (tmp == 0)
        tmp = ktime_get_ns();

    *out = tmp;
}
Beispiel #20
0
void cx23885_video_wakeup(struct cx23885_dev *dev,
	struct cx23885_dmaqueue *q, u32 count)
{
	struct cx23885_buffer *buf;

	if (list_empty(&q->active))
		return;
	buf = list_entry(q->active.next,
			struct cx23885_buffer, queue);

	buf->vb.sequence = q->count++;
	buf->vb.vb2_buf.timestamp = ktime_get_ns();
	dprintk(2, "[%p/%d] wakeup reg=%d buf=%d\n", buf,
			buf->vb.vb2_buf.index, count, q->count);
	list_del(&buf->queue);
	vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_DONE);
}
Beispiel #21
0
static void imx7_csi_dma_unsetup_vb2_buf(struct imx7_csi *csi,
					 enum vb2_buffer_state return_status)
{
	struct imx_media_buffer *buf;
	int i;

	/* return any remaining active frames with return_status */
	for (i = 0; i < 2; i++) {
		buf = csi->active_vb2_buf[i];
		if (buf) {
			struct vb2_buffer *vb = &buf->vbuf.vb2_buf;

			vb->timestamp = ktime_get_ns();
			vb2_buffer_done(vb, return_status);
		}
	}
}
Beispiel #22
0
/*
 * FIMC-IS ISP input and output DMA interface interrupt handler.
 * Locking: called with is->slock spinlock held.
 */
void fimc_isp_video_irq_handler(struct fimc_is *is)
{
	struct fimc_is_video *video = &is->isp.video_capture;
	struct vb2_v4l2_buffer *vbuf;
	int buf_index;

	/* TODO: Ensure the DMA is really stopped in stop_streaming callback */
	if (!test_bit(ST_ISP_VID_CAP_STREAMING, &is->isp.state))
		return;

	buf_index = (is->i2h_cmd.args[1] - 1) % video->buf_count;
	vbuf = &video->buffers[buf_index]->vb;

	vbuf->vb2_buf.timestamp = ktime_get_ns();
	vb2_buffer_done(&vbuf->vb2_buf, VB2_BUF_STATE_DONE);

	video->buf_mask &= ~BIT(buf_index);
	fimc_is_hw_set_isp_buf_mask(is, video->buf_mask);
}
Beispiel #23
0
void sched_clock_tick(void)
{
	struct sched_clock_data *scd;

	WARN_ON_ONCE(!irqs_disabled());

	/*
	 * Update these values even if sched_clock_stable(), because it can
	 * become unstable at any point in time at which point we need some
	 * values to fall back on.
	 *
	 * XXX arguably we can skip this if we expose tsc_clocksource_reliable
	 */
	scd = this_scd();
	scd->tick_raw  = sched_clock();
	scd->tick_gtod = ktime_get_ns();

	if (!sched_clock_stable() && likely(sched_clock_running))
		sched_clock_local(scd);
}
Beispiel #24
0
static void solo_fillbuf(struct solo_dev *solo_dev,
			 struct vb2_buffer *vb)
{
	struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
	dma_addr_t addr;
	unsigned int fdma_addr;
	int error = -1;
	int i;

	addr = vb2_dma_contig_plane_dma_addr(vb, 0);
	if (!addr)
		goto finish_buf;

	if (erase_off(solo_dev)) {
		void *p = vb2_plane_vaddr(vb, 0);
		int image_size = solo_image_size(solo_dev);

		for (i = 0; i < image_size; i += 2) {
			((u8 *)p)[i] = 0x80;
			((u8 *)p)[i + 1] = 0x00;
		}
		error = 0;
	} else {
		fdma_addr = SOLO_DISP_EXT_ADDR + (solo_dev->old_write *
				(SOLO_HW_BPL * solo_vlines(solo_dev)));

		error = solo_p2m_dma_t(solo_dev, 0, addr, fdma_addr,
				       solo_bytesperline(solo_dev),
				       solo_vlines(solo_dev), SOLO_HW_BPL);
	}

finish_buf:
	if (!error) {
		vb2_set_plane_payload(vb, 0,
			solo_vlines(solo_dev) * solo_bytesperline(solo_dev));
		vbuf->sequence = solo_dev->sequence++;
		vb->timestamp = ktime_get_ns();
	}

	vb2_buffer_done(vb, error ? VB2_BUF_STATE_ERROR : VB2_BUF_STATE_DONE);
}
Beispiel #25
0
int cx25821_video_irq(struct cx25821_dev *dev, int chan_num, u32 status)
{
	int handled = 0;
	u32 mask;
	const struct sram_channel *channel = dev->channels[chan_num].sram_channels;

	mask = cx_read(channel->int_msk);
	if (0 == (status & mask))
		return handled;

	cx_write(channel->int_stat, status);

	/* risc op code error */
	if (status & (1 << 16)) {
		pr_warn("%s, %s: video risc op code error\n",
			dev->name, channel->name);
		cx_clear(channel->dma_ctl, 0x11);
		cx25821_sram_channel_dump(dev, channel);
	}

	/* risc1 y */
	if (status & FLD_VID_DST_RISC1) {
		struct cx25821_dmaqueue *dmaq =
			&dev->channels[channel->i].dma_vidq;
		struct cx25821_buffer *buf;

		spin_lock(&dev->slock);
		if (!list_empty(&dmaq->active)) {
			buf = list_entry(dmaq->active.next,
					 struct cx25821_buffer, queue);

			buf->vb.vb2_buf.timestamp = ktime_get_ns();
			buf->vb.sequence = dmaq->count++;
			list_del(&buf->queue);
			vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_DONE);
		}
		spin_unlock(&dev->slock);
		handled++;
	}
Beispiel #26
0
static inline bool nft_limit_eval(struct nft_limit *limit, u64 cost)
{
	u64 now, tokens;
	s64 delta;

	spin_lock_bh(&limit->lock);
	now = ktime_get_ns();
	tokens = limit->tokens + now - limit->last;
	if (tokens > limit->tokens_max)
		tokens = limit->tokens_max;

	limit->last = now;
	delta = tokens - cost;
	if (delta >= 0) {
		limit->tokens = delta;
		spin_unlock_bh(&limit->lock);
		return limit->invert;
	}
	limit->tokens = tokens;
	spin_unlock_bh(&limit->lock);
	return !limit->invert;
}
Beispiel #27
0
void vsp1_histogram_buffer_complete(struct vsp1_histogram *histo,
				    struct vsp1_histogram_buffer *buf,
				    size_t size)
{
	struct vsp1_pipeline *pipe = histo->entity.pipe;
	unsigned long flags;

	/*
	 * The pipeline pointer is guaranteed to be valid as this function is
	 * called from the frame completion interrupt handler, which can only
	 * occur when video streaming is active.
	 */
	buf->buf.sequence = pipe->sequence;
	buf->buf.vb2_buf.timestamp = ktime_get_ns();
	vb2_set_plane_payload(&buf->buf.vb2_buf, 0, size);
	vb2_buffer_done(&buf->buf.vb2_buf, VB2_BUF_STATE_DONE);

	spin_lock_irqsave(&histo->irqlock, flags);
	histo->readout = false;
	wake_up(&histo->wait_queue);
	spin_unlock_irqrestore(&histo->irqlock, flags);
}
Beispiel #28
0
static int nft_limit_init(struct nft_limit *limit,
			  const struct nlattr * const tb[])
{
	u64 unit;

	if (tb[NFTA_LIMIT_RATE] == NULL ||
	    tb[NFTA_LIMIT_UNIT] == NULL)
		return -EINVAL;

	limit->rate = be64_to_cpu(nla_get_be64(tb[NFTA_LIMIT_RATE]));
	unit = be64_to_cpu(nla_get_be64(tb[NFTA_LIMIT_UNIT]));
	limit->nsecs = unit * NSEC_PER_SEC;
	if (limit->rate == 0 || limit->nsecs < unit)
		return -EOVERFLOW;
	limit->tokens = limit->tokens_max = limit->nsecs;

	if (tb[NFTA_LIMIT_BURST]) {
		u64 rate;

		limit->burst = ntohl(nla_get_be32(tb[NFTA_LIMIT_BURST]));

		rate = limit->rate + limit->burst;
		if (rate < limit->rate)
			return -EOVERFLOW;

		limit->rate = rate;
	}
	if (tb[NFTA_LIMIT_FLAGS]) {
		u32 flags = ntohl(nla_get_be32(tb[NFTA_LIMIT_FLAGS]));

		if (flags & NFT_LIMIT_F_INV)
			limit->invert = true;
	}
	limit->last = ktime_get_ns();
	spin_lock_init(&limit->lock);

	return 0;
}
/*
 * Send an acknowledgement message to userspace
 *
 * Use 0 for success, EFOO otherwise.
 * Note: this is the negative of conventional kernel error
 * values because it's not being returned via syscall return
 * mechanisms.
 */
static void cn_proc_ack(int err, int rcvd_seq, int rcvd_ack)
{
	struct cn_msg *msg;
	struct proc_event *ev;
	__u8 buffer[CN_PROC_MSG_SIZE] __aligned(8);

	if (atomic_read(&proc_event_num_listeners) < 1)
		return;

	msg = buffer_to_cn_msg(buffer);
	ev = (struct proc_event *)msg->data;
	memset(&ev->event_data, 0, sizeof(ev->event_data));
	msg->seq = rcvd_seq;
	ev->timestamp_ns = ktime_get_ns();
	ev->cpu = -1;
	ev->what = PROC_EVENT_NONE;
	ev->event_data.ack.err = err;
	memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
	msg->ack = rcvd_ack + 1;
	msg->len = sizeof(*ev);
	msg->flags = 0; /* not used */
	send_msg(msg);
}
void proc_id_connector(struct task_struct *task, int which_id)
{
	struct cn_msg *msg;
	struct proc_event *ev;
	__u8 buffer[CN_PROC_MSG_SIZE] __aligned(8);
	const struct cred *cred;

	if (atomic_read(&proc_event_num_listeners) < 1)
		return;

	msg = buffer_to_cn_msg(buffer);
	ev = (struct proc_event *)msg->data;
	memset(&ev->event_data, 0, sizeof(ev->event_data));
	ev->what = which_id;
	ev->event_data.id.process_pid = task->pid;
	ev->event_data.id.process_tgid = task->tgid;
	rcu_read_lock();
	cred = __task_cred(task);
	if (which_id == PROC_EVENT_UID) {
		ev->event_data.id.r.ruid = from_kuid_munged(&init_user_ns, cred->uid);
		ev->event_data.id.e.euid = from_kuid_munged(&init_user_ns, cred->euid);
	} else if (which_id == PROC_EVENT_GID) {
		ev->event_data.id.r.rgid = from_kgid_munged(&init_user_ns, cred->gid);
		ev->event_data.id.e.egid = from_kgid_munged(&init_user_ns, cred->egid);
	} else {
		rcu_read_unlock();
		return;
	}
	rcu_read_unlock();
	ev->timestamp_ns = ktime_get_ns();

	memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
	msg->ack = 0; /* not used */
	msg->len = sizeof(*ev);
	msg->flags = 0; /* not used */
	send_msg(msg);
}