void l4x_exit_thread(void) { #ifndef CONFIG_L4_VCPU int i; if (unlikely(current->thread.is_hybrid)) { l4_cap_idx_t hybgate; l4_msgtag_t tag; l4_umword_t o = 0; hybgate = L4LX_KERN_CAP_HYBRID_BASE + (current->pid << L4_CAP_SHIFT); tag = l4_ipc_gate_get_infos(hybgate, &o); if (l4_error(tag)) printk("hybrid: Could not get gate info, leaking mem.\n"); else kfree((void *)o); tag = l4_task_unmap(L4_BASE_TASK_CAP, l4_obj_fpage(hybgate, 0, L4_FPAGE_RWX), L4_FP_ALL_SPACES); if (l4_error(tag)) printk("hybrid: Delete of gate failed.\n"); } for (i = 0; i < NR_CPUS; i++) { l4_cap_idx_t thread_id = current->thread.user_thread_ids[i]; /* check if we were a non-user thread (i.e., have no user-space partner) */ if (unlikely(l4_is_invalid_cap(thread_id) || !thread_id)) continue; #ifdef DEBUG LOG_printf("exit_thread: trying to delete %s(%d, " PRINTF_L4TASK_FORM ")\n", current->comm, current->pid, PRINTF_L4TASK_ARG(thread_id)); #endif /* If task_delete fails we don't free the task number so that it * won't be used again. */ if (likely(!l4lx_task_delete_thread(thread_id))) { l4x_hybrid_remove(current); current->thread.user_thread_ids[i] = L4_INVALID_CAP; l4lx_task_number_free(thread_id); current->thread.started = 0; } else printk("%s: failed to delete task " PRINTF_L4TASK_FORM "\n", __func__, PRINTF_L4TASK_ARG(thread_id)); } #endif #ifdef CONFIG_X86_DS ds_exit_thread(current); #endif }
static void l4x_flush_page(struct mm_struct *mm, unsigned long address, unsigned long vaddr, int size, unsigned long flush_rights, unsigned long caller) { l4_msgtag_t tag; if (IS_ENABLED(CONFIG_ARM)) return; if (mm && mm->context.l4x_unmap_mode == L4X_UNMAP_MODE_SKIP) return; if ((address & PAGE_MASK) == 0) address = PAGE0_PAGE_ADDRESS; if (likely(mm)) { unmap_log_add(mm, vaddr, size, flush_rights, caller); return; } /* do the real flush */ if (mm && !l4_is_invalid_cap(mm->context.task)) { /* Direct flush in the child, use virtual address in the * child address space */ tag = L4XV_FN(l4_msgtag_t, l4_task_unmap(mm->context.task, l4_fpage(vaddr & PAGE_MASK, size, flush_rights), L4_FP_ALL_SPACES)); } else { /* Flush all pages in all childs using the 'physical' * address known in the Linux server */ tag = L4XV_FN(l4_msgtag_t, l4_task_unmap(L4RE_THIS_TASK_CAP, l4_fpage(address & PAGE_MASK, size, flush_rights), L4_FP_OTHER_SPACES)); } if (l4_error(tag)) l4x_printf("l4_task_unmap error %ld\n", l4_error(tag)); }
static inline void l4x_unmap_self(unsigned long a) { l4_msgtag_t t; if (0) printk("dma-self-unmap: %08lx\n", a); a &= PAGE_MASK; t = L4XV_FN(l4_msgtag_t, l4_task_unmap(L4_BASE_TASK_CAP, l4_fpage(a, PAGE_SHIFT, L4_FPAGE_RWX), L4_FP_ALL_SPACES)); if (l4_error(t)) printk("dma-remap: internal unmapping of %08lx failed\n", a); }
void l4x_unmap_log_flush(void) { unsigned i; struct unmap_log_t *log; unsigned long flags; local_irq_save(flags); log = this_cpu_ptr(&unmap_log); for (i = 0; i < log->cnt; ++i) { l4_msgtag_t tag; struct mm_struct *mm = log->log[i].mm; if (unlikely(l4_is_invalid_cap(mm->context.task))) continue; tag = L4XV_FN(l4_msgtag_t, l4_task_unmap(mm->context.task, l4_fpage(log->log[i].addr, log->log[i].size, log->log[i].rights), L4_FP_ALL_SPACES)); if (unlikely(l4_error(tag))) { l4x_printf("l4_task_unmap error %ld: t=%lx\n", l4_error(tag), mm->context.task); WARN_ON(1); } else if (0) l4x_printf("flushing(%d) %lx:%08lx[%d,%x]\n", i, mm->context.task, log->log[i].addr, log->log[i].size, log->log[i].rights); } log->cnt = 0; local_irq_restore(flags); }
static void l4x_flush_page(struct mm_struct *mm, unsigned long address, unsigned long vaddr, int size, unsigned long flush_rights) { l4_msgtag_t tag; if (mm && mm->context.l4x_unmap_mode == L4X_UNMAP_MODE_SKIP) return; /* some checks: */ if (address > 0x80000000UL) { unsigned long remap; remap = find_ioremap_entry(address); /* VU: it may happen, that memory is not remapped but mapped in * user space, if a task mmaps /dev/mem but never accesses it. * Therefore, we fail silently... */ if (!remap) return; address = remap; } else if ((address & PAGE_MASK) == 0) address = PAGE0_PAGE_ADDRESS; #if 0 /* only for debugging */ else { if ((address >= (unsigned long)high_memory) && (address < 0x80000000UL)) { printk("flushing non physical page (0x%lx)\n", address); enter_kdebug("flush_page: non physical page"); } } #endif /* do the real flush */ if (mm && !l4_is_invalid_cap(mm->context.task)) { L4XV_V(f); if (!mm->context.task) l4x_printf("%s: Ups, task == 0\n", __func__); /* Direct flush in the child, use virtual address in the * child address space */ L4XV_L(f); tag = l4_task_unmap(mm->context.task, l4_fpage(vaddr & PAGE_MASK, size, flush_rights), L4_FP_ALL_SPACES); L4XV_U(f); } else { L4XV_V(f); /* Flush all pages in all childs using the 'physical' * address known in the Linux server */ L4XV_L(f); tag = l4_task_unmap(L4RE_THIS_TASK_CAP, l4_fpage(address & PAGE_MASK, size, flush_rights), L4_FP_OTHER_SPACES); L4XV_U(f); } if (l4_error(tag)) l4x_printf("l4_task_unmap error %ld\n", l4_error(tag)); }