Ejemplo n.º 1
0
static inline void arch_timer_reg_write(int access, int reg, u32 val,
					struct clock_event_device *clk)
{
	if (access == ARCH_TIMER_MEM_PHYS_ACCESS) {
		struct arch_timer *timer = to_arch_timer(clk);
		switch (reg) {
		case ARCH_TIMER_REG_CTRL:
			writel_relaxed_no_log(val, timer->base + CNTP_CTL);
			break;
		case ARCH_TIMER_REG_TVAL:
			writel_relaxed_no_log(val, timer->base + CNTP_TVAL);
			break;
		default:
			BUILD_BUG();
		}
	} else if (access == ARCH_TIMER_MEM_VIRT_ACCESS) {
		struct arch_timer *timer = to_arch_timer(clk);
		switch (reg) {
		case ARCH_TIMER_REG_CTRL:
			writel_relaxed_no_log(val, timer->base + CNTV_CTL);
			break;
		case ARCH_TIMER_REG_TVAL:
			writel_relaxed_no_log(val, timer->base + CNTV_TVAL);
			break;
		default:
			BUILD_BUG();
		}
	} else {
		arch_timer_reg_write_cp15(access, reg, val);
	}
}
Ejemplo n.º 2
0
static inline u32 arch_timer_reg_read(int access, int reg,
				      struct clock_event_device *clk)
{
	u32 val;

	if (access == ARCH_TIMER_MEM_PHYS_ACCESS) {
		struct arch_timer *timer = to_arch_timer(clk);
		switch (reg) {
		case ARCH_TIMER_REG_CTRL:
			val = readl_relaxed_no_log(timer->base + CNTP_CTL);
			break;
		case ARCH_TIMER_REG_TVAL:
			val = readl_relaxed_no_log(timer->base + CNTP_TVAL);
			break;
		default:
			BUILD_BUG();
		}
	} else if (access == ARCH_TIMER_MEM_VIRT_ACCESS) {
		struct arch_timer *timer = to_arch_timer(clk);
		switch (reg) {
		case ARCH_TIMER_REG_CTRL:
			val = readl_relaxed_no_log(timer->base + CNTV_CTL);
			break;
		case ARCH_TIMER_REG_TVAL:
			val = readl_relaxed_no_log(timer->base + CNTV_TVAL);
			break;
		default:
			BUILD_BUG();
		}
	} else {
		val = arch_timer_reg_read_cp15(access, reg);
	}

	return val;
}
Ejemplo n.º 3
0
/* Multi enqueue before dequeue
 * - strange test as bulk is normal solution, but want to see
 *   if we didn't have/use bulk, and touch more of array
 */
static __always_inline int time_multi_enq_deq(
	struct time_bench_record *rec, void *data,
	enum queue_behavior_type type)
{
	int on_stack = 123;
	int *obj = &on_stack;
	int *deq_obj = NULL;
	int i, n;
	uint64_t loops_cnt = 0;
	int elems = rec->step;
	struct alf_queue* queue = (struct alf_queue*)data;

	if (queue == NULL) {
		pr_err("Need queue struct ptr as input\n");
		return -1;
	}
	/* loop count is limited to 32-bit due to div_u64_rem() use */
	if (((uint64_t)rec->loops * 2 * elems) >= ((1ULL<<32)-1)) {
		pr_err("Loop cnt too big will overflow 32-bit\n");
		return 0;
	}

	time_bench_start(rec);

	/** Loop to measure **/
	for (i = 0; i < rec->loops; i++) {
		for (n = 0; n < elems; n++) {
			if (type & ALF_FLAG_SP) {
				if (alf_sp_enqueue(queue,(void **)&obj, 1) != 1)
					goto fail;
			} else if (type  & ALF_FLAG_MP) {
				if (alf_mp_enqueue(queue,(void **)&obj, 1) != 1)
					goto fail;
			} else {
				BUILD_BUG();
			}
			loops_cnt++;
		}
		barrier(); /* compiler barrier */
		for (n = 0; n < elems; n++) {
			if (type & ALF_FLAG_SC) {
				if (alf_sc_dequeue(queue, (void **)&deq_obj, 1) != 1)
					goto fail;
			} else if (type & ALF_FLAG_MC) {
				if (alf_mc_dequeue(queue, (void **)&deq_obj, 1) != 1)
					goto fail;
			} else {
				BUILD_BUG();
			}
			loops_cnt++;
		}
	}

	time_bench_stop(rec, loops_cnt);

	return loops_cnt;
fail:
	return -1;
}
Ejemplo n.º 4
0
static __always_inline bool memory_is_poisoned(unsigned long addr, size_t size)
{
	if (__builtin_constant_p(size)) {
		switch (size) {
		case 1:
			return memory_is_poisoned_1(addr);
		case 2:
		case 4:
		case 8:
			return memory_is_poisoned_2_4_8(addr, size);
		case 16:
			return memory_is_poisoned_16(addr);
		default:
			BUILD_BUG();
		}
	}

	return memory_is_poisoned_n(addr, size);
}
static __always_inline int time_bench_CPU_enq_or_deq(
	struct time_bench_record *rec, void *data,
	enum queue_behavior_type type)
{
	int on_stack = 123;
	int *obj = &on_stack;
	int *deq_obj = NULL;
	int i;
	uint64_t loops_cnt = 0;
	struct alf_queue *queue = (struct alf_queue*)data;
	bool enq_CPU = false;

	if (queue == NULL) {
		pr_err("Need queue struct ptr as input\n");
		return -1;
	}
	/* loop count is limited to 32-bit due to div_u64_rem() use */
	if (((uint64_t)rec->loops * 2) >= ((1ULL<<32)-1)) {
		pr_err("Loop cnt too big will overflow 32-bit\n");
		return 0;
	}

	/* Split CPU between enq/deq based on even/odd */
	if ((smp_processor_id() % 2)== 0)
		enq_CPU = true;

	/* Hack: use "step" to mark enq/deq, as "step" gets printed */
	rec->step = enq_CPU;

	time_bench_start(rec);
	/** Loop to measure **/
	for (i = 0; i < rec->loops; i++) {

		if (enq_CPU) {
			/* Compile will hopefully optimized this out */
			if (type & ALF_FLAG_SP) {
				if (alf_sp_enqueue(queue, (void **)&obj, 1)!=1)
					goto finish_early;
			} else if (type & ALF_FLAG_MP) {
				if (alf_mp_enqueue(queue, (void **)&obj, 1)!=1)
					goto finish_early;
			} else {
				BUILD_BUG();
			}
		} else {
			if (type & ALF_FLAG_SC) {
				if (alf_sc_dequeue(queue,
						   (void **)&deq_obj, 1) != 1)
					goto finish_early;
			} else if (type & ALF_FLAG_MC) {
				if (alf_mc_dequeue(queue,
						   (void **)&deq_obj, 1) != 1)
					goto finish_early;
			} else {
				BUILD_BUG();
			}
		}
		barrier(); /* compiler barrier */
		loops_cnt++;
	}
	time_bench_stop(rec, loops_cnt);
	return loops_cnt;

finish_early:
	time_bench_stop(rec, loops_cnt);
	if (enq_CPU) {
		pr_err("%s() WARN: enq fullq(CPU:%d) i:%d\n",
		       __func__, smp_processor_id(), i);
	} else {
		pr_err("%s() WARN: deq emptyq (CPU:%d) i:%d\n",
		       __func__, smp_processor_id(), i);
	}
	return loops_cnt;
}
/* Below bulk variant */
static __always_inline int time_bench_CPU_BULK_enq_or_deq(
	struct time_bench_record *rec, void *data,
	enum queue_behavior_type type)
{
#define MAX_BULK 64
	uint64_t loops_cnt = 0;
	int *deq_objs[MAX_BULK];
	int *objs[MAX_BULK];
	int bulk = rec->step;
	struct alf_queue* queue = (struct alf_queue*)data;
	bool enq_CPU = false;
	int i;

	if (queue == NULL) {
		pr_err("Need alf_queue as input\n");
		return -1;
	}
	if (bulk > MAX_BULK) {
		pr_warn("%s() bulk(%d) request too big cap at %d\n",
			__func__, bulk, MAX_BULK);
		bulk = MAX_BULK;
		rec->step = MAX_BULK;
	}
	/* loop count is limited to 32-bit due to div_u64_rem() use */
	if (((uint64_t)rec->loops * bulk *2) >= ((1ULL<<32)-1)) {
		pr_err("Loop cnt too big will overflow 32-bit\n");
		return 0;
	}

	/* Split CPU between enq/deq based on even/odd */
	if ((smp_processor_id() % 2)== 0)
		enq_CPU = true;

	/* fake init pointers to a number */
	for (i = 0; i < MAX_BULK; i++)
		objs[i] = (void *)(unsigned long)(i+20);

	time_bench_start(rec);

	/** Loop to measure **/
	// for (i = 0; i < rec->loops; i++) {
	for (i = 0; loops_cnt < rec->loops; i++) {

		if (enq_CPU) { /* Enqueue side */
			/* Compile will hopefully optimized this out */
			if (type & ALF_FLAG_SP) {
				if (alf_sp_enqueue(queue,
						   (void**)objs, bulk) != bulk)
					goto finish_early;
			} else if (type & ALF_FLAG_MP) {
				if (alf_mp_enqueue(queue,
						   (void**)objs, bulk) != bulk)
					goto finish_early;
			} else {
				BUILD_BUG();
			}
		} else { /* Dequeue side */
			if (type & ALF_FLAG_SC) {
				if (alf_sc_dequeue(queue, (void **)deq_objs,
						   bulk) != bulk)
					goto finish_early;
			} else if (type & ALF_FLAG_MC) {
				if (alf_mc_dequeue(queue, (void **)deq_objs,
						   bulk) != bulk)
					goto finish_early;
			} else {
				BUILD_BUG();
			}
		}
		barrier(); /* compiler barrier */
		loops_cnt +=bulk;
	}
	time_bench_stop(rec, loops_cnt);
	return loops_cnt;

finish_early:
	time_bench_stop(rec, loops_cnt);
	if (enq_CPU) {
		pr_err("%s() WARN: enq fullq(CPU:%d) i:%d bulk:%d\n",
		       __func__, smp_processor_id(), i, bulk);
	} else {
		pr_err("%s() WARN: deq emptyq (CPU:%d) i:%d bulk:%d\n",
		       __func__, smp_processor_id(), i, bulk);
	}
	return loops_cnt;
#undef MAX_BULK
}
Ejemplo n.º 7
0
static __always_inline int time_BULK_enq_deq(
	struct time_bench_record *rec, void *data,
	enum queue_behavior_type type)
{
#define MAX_BULK 32
	int *objs[MAX_BULK];
	int *deq_objs[MAX_BULK];
	int i;
	uint64_t loops_cnt = 0;
	int bulk = rec->step;
	struct alf_queue* queue = (struct alf_queue*)data;

	if (queue == NULL) {
		pr_err("Need alf_queue as input\n");
		return -1;
	}
	if (bulk > MAX_BULK) {
		pr_warn("%s() bulk(%d) request too big cap at %d\n",
			__func__, bulk, MAX_BULK);
		bulk = MAX_BULK;
	}
	/* loop count is limited to 32-bit due to div_u64_rem() use */
	if (((uint64_t)rec->loops * bulk *2) >= ((1ULL<<32)-1)) {
		pr_err("Loop cnt too big will overflow 32-bit\n");
		return 0;
	}
	/* fake init pointers to a number */
	for (i = 0; i < MAX_BULK; i++)
		objs[i] = (void *)(unsigned long)(i+20);

	time_bench_start(rec);

	/** Loop to measure **/
	for (i = 0; i < rec->loops; i++) {
		if (type & ALF_FLAG_SP) {
			if (alf_sp_enqueue(queue, (void**)objs, bulk) != bulk)
				goto fail;
		} else if (type & ALF_FLAG_MP) {
			if (alf_mp_enqueue(queue, (void**)objs, bulk) != bulk)
				goto fail;
		} else {
			BUILD_BUG();
		}
		loops_cnt += bulk;

		barrier(); /* compiler barrier */
		if (type & ALF_FLAG_SC) {
			if (alf_sc_dequeue(queue, (void **)deq_objs, bulk) != bulk)
				goto fail;
		} else if (type & ALF_FLAG_MC) {
			if (alf_mc_dequeue(queue, (void **)deq_objs, bulk) != bulk)
				goto fail;
		} else {
			BUILD_BUG();
		}
		loops_cnt +=bulk;
	}

	time_bench_stop(rec, loops_cnt);

	return loops_cnt;
fail:
	return -1;
}