Exemplo n.º 1
0
struct page_allocator *page_allocator_init(char *start, size_t len, size_t page_size) {
	char *pages_start;
	struct page_allocator *allocator;
	unsigned int pages;
	size_t bitmap_len;

	if (len < page_size) {
		return NULL;
	}

	start = (char *) binalign_bound((uintptr_t) start, 16);
	pages = len / page_size;
	pages_start = (char *) binalign_bound((uintptr_t) start, page_size);

	bitmap_len = sizeof(unsigned long) * BITMAP_SIZE(pages + 1); /* one for guardbit */

	while (sizeof(struct page_allocator) + bitmap_len > pages_start - start) {
		pages_start += page_size;
		pages --;
		assert(pages > 0);
	}

	allocator = (struct page_allocator *) start;
	allocator->pages_start = pages_start;
	allocator->pages_n = pages;
	allocator->page_size = page_size;
	allocator->free = pages * page_size;
	allocator->bitmap_len = bitmap_len;
	allocator->bitmap = (unsigned long *)((uintptr_t)&allocator->bitmap + sizeof(allocator->bitmap));

	memset(allocator->bitmap, 0, bitmap_len);
	bitmap_set_bit(allocator->bitmap, pages);

	return allocator;
}
Exemplo n.º 2
0
static int bochs_init(struct pci_slot_dev *pci_dev) {
	int ret;
	struct fb_info *info;
	size_t mmap_len;

	assert(pci_dev != NULL);

	info = fb_alloc();
	if (info == NULL) {
		return -ENOMEM;
	}

	memcpy(&info->fix, &bochs_fix_screeninfo, sizeof info->fix);
	fill_var(&info->var);

	info->ops = &bochs_ops;
	info->screen_base = (void *)(pci_dev->bar[0] & ~0xf); /* FIXME */
	mmap_len = binalign_bound(VBE_DISPI_MAX_XRES * VBE_DISPI_MAX_YRES * VBE_DISPI_MAX_BPP / 8, PAGE_SIZE());

	if (MAP_FAILED == mmap_device_memory(info->screen_base,
				mmap_len,
			       	PROT_READ|PROT_WRITE|PROT_NOCACHE,
				MAP_FIXED,
				(unsigned long) info->screen_base)) {
		return -EIO;
	}

	ret = fb_register(info);
	if (ret != 0) {
		fb_release(info);
		return ret;
	}

	return 0;
}
Exemplo n.º 3
0
static int ns16550_init(void) {
	/* Map one vmem page to handle this device if mmu is used */
	mmap_device_memory(
			(void*) (COM_BASE & ~MMU_PAGE_MASK),
			PROT_READ | PROT_WRITE | PROT_NOCACHE,
			binalign_bound(sizeof (struct com), MMU_PAGE_SIZE),
			MAP_FIXED,
			COM_BASE & ~MMU_PAGE_MASK
			);
	return 0;
}
Exemplo n.º 4
0
int fgetgrent_r(FILE *fp, struct group *gbuf, char *tbuf,
		size_t buflen, struct group **gbufp) {
	int res;
	char *buf = tbuf;
	size_t buf_len = buflen;
	char *ch, **pmem;

	*gbufp = NULL;

	if (0 != (res = read_field(fp, &buf, &buf_len, &gbuf->gr_name, ':'))) {
		return res;
	}

	if (0 != (res = read_field(fp, &buf, &buf_len, &gbuf->gr_passwd, ':'))) {
		return res;
	}

	if (0 != (res = read_int_field(fp, "%hd", &gbuf->gr_gid, ':'))) {
		return res;
	}

	if (0 != (res = read_field(fp, &buf, &buf_len, &ch, '\n'))) {
		return res;
	}

	gbuf->gr_mem = pmem = (char **)binalign_bound((uintptr_t)buf, sizeof(void *));
	buf_len -= (uintptr_t)pmem - (uintptr_t)buf;

	*pmem = ch;

	while (NULL != (ch = strchr(ch, ','))) {

		if (buf_len < sizeof(char *)) {
			return -ERANGE;
		}

		buf_len -= sizeof(char *);

		*ch++ = '\0';

		*(++pmem) = ch;

	}

	if (buf_len < sizeof(char *)) {
		return -ERANGE;
	}
	*(++pmem) = NULL;

	*gbufp = gbuf;

	return 0;
}
Exemplo n.º 5
0
static int omap_clk_init(void) {
	/* Map one vmem page to handle this device if mmu is used */
	mmap_device_memory(
			(void*) ((uintptr_t) GPTIMER1_BASE & ~MMU_PAGE_MASK),
			PROT_READ | PROT_WRITE | PROT_NOCACHE,
			binalign_bound(sizeof(struct gptimerxx_x), MMU_PAGE_SIZE),
			VMEM_PAGE_WRITABLE,
			((uintptr_t) GPTIMER1_BASE & ~MMU_PAGE_MASK)
			);
	clock_source_register(&omap3_clk_clock_source);
	return irq_attach(GPTIMER1_IRQ, clock_handler, 0, &omap3_clk_clock_source, "omap3_clk");
}
Exemplo n.º 6
0
static int omap3_intc_init(void) {
	/* Map one vmem page to handle this device if mmu is used */
	mmap_device_memory(
			(void*) (OMAP35X_INTC_BASE & ~MMU_PAGE_MASK),
			PROT_READ | PROT_WRITE | PROT_NOCACHE,
			binalign_bound(
				OMAP35X_INTC_ILR(__IRQCTRL_IRQS_TOTAL) - OMAP35X_INTC_BASE,
				MMU_PAGE_MASK),
			MAP_FIXED,
			OMAP35X_INTC_BASE & ~MMU_PAGE_MASK
			);

	return 0;
}
Exemplo n.º 7
0
static int this_init(void) {
	/* Map one vmem page to handle this device if mmu is used */
	mmap_device_memory(
			(void*) ((uintptr_t) BCM2835_SYSTEM_TIMER_BASE & ~MMU_PAGE_MASK),
			PROT_READ | PROT_WRITE | PROT_NOCACHE,
			binalign_bound(sizeof(struct raspi_timer_regs), MMU_PAGE_SIZE),
			MAP_FIXED,
			((uintptr_t) BCM2835_SYSTEM_TIMER_BASE & ~MMU_PAGE_MASK)
			);

	clock_source_register(&this_clock_source);
	irq_attach(SYSTICK_IRQ, clock_handler, 0, &this_clock_source,
		"Raspberry PI systick timer");
	return 0;
}
Exemplo n.º 8
0
static int bochs_init(struct pci_slot_dev *pci_dev) {
	char *mmap_base = (char *)(pci_dev->bar[0] & ~0xf); /* FIXME */
	size_t mmap_len = binalign_bound(VBE_DISPI_MAX_XRES 
			* VBE_DISPI_MAX_YRES 
			* VBE_DISPI_MAX_BPP / 8, PAGE_SIZE());
	struct fb_info *info;

	if (MAP_FAILED == mmap_device_memory(mmap_base,
				mmap_len,
			       	PROT_READ|PROT_WRITE|PROT_NOCACHE,
				MAP_FIXED,
				(unsigned long) mmap_base)) {
		return -EIO;
	}

	info = fb_create(&bochs_ops, mmap_base, mmap_len);
	if (info == NULL) {
		munmap(mmap_base, mmap_len);
		return -ENOMEM;
	}

	return 0;
}
Exemplo n.º 9
0
void vmem_unmap_region(mmu_ctx_t ctx, mmu_vaddr_t virt_addr, size_t reg_size, int free_pages) {
	mmu_pgd_t *pgd;
	mmu_pmd_t *pmd;
	mmu_pte_t *pte;
	mmu_paddr_t v_end = virt_addr + reg_size;
	size_t pgd_idx, pmd_idx, pte_idx;
	void *addr;

	/* Considering that all boundaries are already aligned */
	assert(!(virt_addr & MMU_PAGE_MASK));
	assert(!(reg_size  & MMU_PAGE_MASK));

	pgd = mmu_get_root(ctx);

	vmem_get_idx_from_vaddr(virt_addr, &pgd_idx, &pmd_idx, &pte_idx);

	for ( ; pgd_idx < MMU_PGD_ENTRIES; pgd_idx++) {
		if (!mmu_pgd_present(pgd + pgd_idx)) {
			virt_addr = binalign_bound(virt_addr, MMU_PGD_SIZE);
			pte_idx = pmd_idx = 0;
			continue;
		}

		pmd = mmu_pgd_value(pgd + pgd_idx);

		for ( ; pmd_idx < MMU_PMD_ENTRIES; pmd_idx++) {
			if (!mmu_pmd_present(pmd + pmd_idx)) {
				virt_addr = binalign_bound(virt_addr, MMU_PMD_SIZE);
				pte_idx = 0;
				continue;
			}

			pte = mmu_pmd_value(pmd + pmd_idx);

			for ( ; pte_idx < MMU_PTE_ENTRIES; pte_idx++) {
				if (virt_addr >= v_end) {
					// Try to free pte, pmd, pgd
					if (try_free_pte(pte, pmd + pmd_idx) && try_free_pmd(pmd, pgd + pgd_idx)) {
						try_free_pgd(pgd, ctx);
					}

					mmu_flush_tlb();
					return;
				}

				if (mmu_pte_present(pte + pte_idx)) {
					if (free_pages && mmu_pte_present(pte + pte_idx)) {
						addr = (void *) mmu_pte_value(pte + pte_idx);
						vmem_free_page(addr);
					}

					mmu_pte_unset(pte + pte_idx);
				}

				virt_addr += VMEM_PAGE_SIZE;
			}
			try_free_pte(pte, pmd + pmd_idx);
			pte_idx = 0;
		}

		try_free_pmd(pmd, pgd + pgd_idx);
		pmd_idx = 0;
	}

	try_free_pgd(pgd, ctx);

	mmu_flush_tlb();
}