示例#1
0
int
io_apic_add(unsigned id, uintphys_t address, unsigned base)
{
	struct io_apic* io_apic;
	uintptr_t page;
	unsigned i;
	int result;
	uint32_t version;

	result = kpool_get(&io_apic_pool, (void**)&io_apic);
	if (result)
		goto fail;
	result = vmem_alloc_page(&page, 0);
	if (result)
		goto fail_pool;
	result = paging_map_page(page, address, 0);
	if (result)
		goto fail_vmem;

	io_apic->id = id;
	io_apic->irq_base = base;
	io_apic->mmio = (volatile uint32_t*)page;

	version = io_apic_in(io_apic, IOAPICVER);
	io_apic->irq_count = ((version >> 16) & 0xff) + 1;
	trace();
	for (i = io_apic->irq_base; i < io_apic->irq_base + io_apic->irq_count;
		i++) {
		if (i >= MAX_IRQ_COUNT || irq_ctrl[i])
			goto fail_paging;
		irq_ctrl[i] = io_apic;
	}

	return E_OK;

fail_paging:
	paging_unmap_page(page, NULL);
fail_vmem:
	vmem_free_page(page);
fail_pool:
	kpool_put(&io_apic_pool, io_apic);
fail:
	return result;
}
示例#2
0
void vmem_unmap_region(mmu_ctx_t ctx, mmu_vaddr_t virt_addr, size_t reg_size, int free_pages) {
	mmu_pgd_t *pgd;
	mmu_pmd_t *pmd;
	mmu_pte_t *pte;
	mmu_paddr_t v_end = virt_addr + reg_size;
	size_t pgd_idx, pmd_idx, pte_idx;
	void *addr;

	/* Considering that all boundaries are already aligned */
	assert(!(virt_addr & MMU_PAGE_MASK));
	assert(!(reg_size  & MMU_PAGE_MASK));

	pgd = mmu_get_root(ctx);

	vmem_get_idx_from_vaddr(virt_addr, &pgd_idx, &pmd_idx, &pte_idx);

	for ( ; pgd_idx < MMU_PGD_ENTRIES; pgd_idx++) {
		if (!mmu_pgd_present(pgd + pgd_idx)) {
			virt_addr = binalign_bound(virt_addr, MMU_PGD_SIZE);
			pte_idx = pmd_idx = 0;
			continue;
		}

		pmd = mmu_pgd_value(pgd + pgd_idx);

		for ( ; pmd_idx < MMU_PMD_ENTRIES; pmd_idx++) {
			if (!mmu_pmd_present(pmd + pmd_idx)) {
				virt_addr = binalign_bound(virt_addr, MMU_PMD_SIZE);
				pte_idx = 0;
				continue;
			}

			pte = mmu_pmd_value(pmd + pmd_idx);

			for ( ; pte_idx < MMU_PTE_ENTRIES; pte_idx++) {
				if (virt_addr >= v_end) {
					// Try to free pte, pmd, pgd
					if (try_free_pte(pte, pmd + pmd_idx) && try_free_pmd(pmd, pgd + pgd_idx)) {
						try_free_pgd(pgd, ctx);
					}

					mmu_flush_tlb();
					return;
				}

				if (mmu_pte_present(pte + pte_idx)) {
					if (free_pages && mmu_pte_present(pte + pte_idx)) {
						addr = (void *) mmu_pte_value(pte + pte_idx);
						vmem_free_page(addr);
					}

					mmu_pte_unset(pte + pte_idx);
				}

				virt_addr += VMEM_PAGE_SIZE;
			}
			try_free_pte(pte, pmd + pmd_idx);
			pte_idx = 0;
		}

		try_free_pmd(pmd, pgd + pgd_idx);
		pmd_idx = 0;
	}

	try_free_pgd(pgd, ctx);

	mmu_flush_tlb();
}