Esempio n. 1
0
void l4x_pte_check_empty(struct mm_struct *mm)
{
	struct unmap_log_t *log;
	int i;

	WARN_ON(!irqs_disabled()); // otherwise we need to go non-preemtible

	log = this_cpu_ptr(&unmap_log);

	if (likely(this_cpu_read(unmap_log.cnt) == 0))
		return;

	for (i = 0; i < log->cnt; ++i) {
		if (mm != log->log[i].mm)
			continue;

		l4x_printf("L4x: exiting with non-flushed entry: %lx:%lx[sz=%d,r=%x,from=%lx,cpu=%d,num=%d]\n",
			   log->log[i].mm->context.task,
		           log->log[i].addr, log->log[i].size,
		           log->log[i].rights,
		           log->log[i].dbg1, raw_smp_processor_id(), i);
	}

	l4x_unmap_log_flush();
}
Esempio n. 2
0
void l4x_unmap_log_flush(void)
{
	unsigned i;
	struct unmap_log_t *log;
	unsigned long flags;

	local_irq_save(flags);

	log = this_cpu_ptr(&unmap_log);

	for (i = 0; i < log->cnt; ++i) {
		l4_msgtag_t tag;
		struct mm_struct *mm = log->log[i].mm;

		if (unlikely(l4_is_invalid_cap(mm->context.task)))
			continue;

		tag = L4XV_FN(l4_msgtag_t,
		              l4_task_unmap(mm->context.task,
		                            l4_fpage(log->log[i].addr,
		                                     log->log[i].size,
		                                     log->log[i].rights),
		                            L4_FP_ALL_SPACES));
		if (unlikely(l4_error(tag))) {
			l4x_printf("l4_task_unmap error %ld: t=%lx\n",
			           l4_error(tag), mm->context.task);
			WARN_ON(1);
		} else if (0)
			l4x_printf("flushing(%d) %lx:%08lx[%d,%x]\n",
			           i, mm->context.task,
			           log->log[i].addr, log->log[i].size,
			           log->log[i].rights);
	}

	log->cnt = 0;
	local_irq_restore(flags);
}
Esempio n. 3
0
void l4x_pte_check_empty(struct mm_struct *mm)
{
	if (this_cpu_read(unmap_log.cnt)) {
		struct unmap_log_t *log = &__get_cpu_var(unmap_log);
		int i;

		for (i = 0; i < log->cnt; ++i) {
			if (mm != log->log[i].mm)
				continue;

			l4x_printf("L4x: exiting with non-flushed entry: %lx:%lx[%d,%x]\n",
				   log->log[i].mm->context.task,
			           log->log[i].addr, log->log[i].size,
			           log->log[i].rights);
		}

	}
}
Esempio n. 4
0
static void l4x_flush_page(struct mm_struct *mm,
                           unsigned long address,
                           unsigned long vaddr,
                           int size,
                           unsigned long flush_rights, unsigned long caller)
{
	l4_msgtag_t tag;

	if (IS_ENABLED(CONFIG_ARM))
		return;

	if (mm && mm->context.l4x_unmap_mode == L4X_UNMAP_MODE_SKIP)
		return;

	if ((address & PAGE_MASK) == 0)
		address = PAGE0_PAGE_ADDRESS;

	if (likely(mm)) {
		unmap_log_add(mm, vaddr, size, flush_rights, caller);
		return;
	}

	/* do the real flush */
	if (mm && !l4_is_invalid_cap(mm->context.task)) {
		/* Direct flush in the child, use virtual address in the
		 * child address space */
		tag = L4XV_FN(l4_msgtag_t,
		              l4_task_unmap(mm->context.task,
		                           l4_fpage(vaddr & PAGE_MASK, size,
		                                    flush_rights),
		                           L4_FP_ALL_SPACES));
	} else {
		/* Flush all pages in all childs using the 'physical'
		 * address known in the Linux server */
		tag = L4XV_FN(l4_msgtag_t,
		              l4_task_unmap(L4RE_THIS_TASK_CAP,
			                    l4_fpage(address & PAGE_MASK, size,
		                                     flush_rights),
			                    L4_FP_OTHER_SPACES));
	}

	if (l4_error(tag))
		l4x_printf("l4_task_unmap error %ld\n", l4_error(tag));
}
Esempio n. 5
0
static int __init l4x_module_init(void)
{
	printk("Hi from the sample module\n");
	l4x_printf("sample module: Also a warm welcome to the console\n");
	return 0;
}
Esempio n. 6
0
static void __exit l4x_module_exit(void)
{
	l4x_printf("Bye from sample module\n");
}
Esempio n. 7
0
static void l4x_flush_page(struct mm_struct *mm,
                           unsigned long address,
                           unsigned long vaddr,
                           int size,
                           unsigned long flush_rights)
{
	l4_msgtag_t tag;

	if (mm && mm->context.l4x_unmap_mode == L4X_UNMAP_MODE_SKIP)
		return;

	/* some checks: */
	if (address > 0x80000000UL) {
		unsigned long remap;
		remap = find_ioremap_entry(address);

		/* VU: it may happen, that memory is not remapped but mapped in
		 * user space, if a task mmaps /dev/mem but never accesses it.
		 * Therefore, we fail silently...
		 */
		if (!remap)
			return;

		address = remap;

	} else if ((address & PAGE_MASK) == 0)
		address = PAGE0_PAGE_ADDRESS;

#if 0
	/* only for debugging */
	else {
		if ((address >= (unsigned long)high_memory)
		    && (address < 0x80000000UL)) {
			printk("flushing non physical page (0x%lx)\n",
				    address);
			enter_kdebug("flush_page: non physical page");
		}
	}
#endif

	/* do the real flush */
	if (mm && !l4_is_invalid_cap(mm->context.task)) {
		L4XV_V(f);
		if (!mm->context.task)
			l4x_printf("%s: Ups, task == 0\n", __func__);
		/* Direct flush in the child, use virtual address in the
		 * child address space */
		L4XV_L(f);
		tag = l4_task_unmap(mm->context.task,
		                    l4_fpage(vaddr & PAGE_MASK, size, flush_rights),
		                    L4_FP_ALL_SPACES);
		L4XV_U(f);
	} else {
		L4XV_V(f);
		/* Flush all pages in all childs using the 'physical'
		 * address known in the Linux server */
		L4XV_L(f);
		tag = l4_task_unmap(L4RE_THIS_TASK_CAP,
			            l4_fpage(address & PAGE_MASK, size, flush_rights),
			            L4_FP_OTHER_SPACES);
		L4XV_U(f);
	}
	if (l4_error(tag))
		l4x_printf("l4_task_unmap error %ld\n", l4_error(tag));
}