Esempio n. 1
0
void mmap_add_region(unsigned long base_pa, unsigned long base_va,
			unsigned long size, unsigned attr)
{
	mmap_region_t *mm = mmap;
	mmap_region_t *mm_last = mm + ARRAY_SIZE(mmap) - 1;
	unsigned long pa_end = base_pa + size - 1;
	unsigned long va_end = base_va + size - 1;

	assert(IS_PAGE_ALIGNED(base_pa));
	assert(IS_PAGE_ALIGNED(base_va));
	assert(IS_PAGE_ALIGNED(size));

	if (!size)
		return;

	/* Find correct place in mmap to insert new region */
	while (mm->base_va < base_va && mm->size)
		++mm;

	/* Make room for new region by moving other regions up by one place */
	memmove(mm + 1, mm, (uintptr_t)mm_last - (uintptr_t)mm);

	/* Check we haven't lost the empty sentinal from the end of the array */
	assert(mm_last->size == 0);

	mm->base_pa = base_pa;
	mm->base_va = base_va;
	mm->size = size;
	mm->attr = attr;

	if (pa_end > max_pa)
		max_pa = pa_end;
	if (va_end > max_va)
		max_va = va_end;
}
Esempio n. 2
0
static VOID
DumpPortWrite(
    IN  ULONG64         Offset,
    IN  PVOID           Buffer,
    IN  ULONG           Length
    )
{
    PHYSICAL_ADDRESS    Address;

    ASSERT(Offset == (ULONG64)-1);
    ASSERT(IS_PAGE_ALIGNED(Buffer));
    ASSERT(IS_PAGE_ALIGNED(Length));

    //
    // Sometimes Windows passes us virtual addresses, sometimes it passes
    // physical addresses. It doesn't tell us which it's handing us, and
    // how this plays with PAE is anybody's guess.
    //
    Address = MmGetPhysicalAddress(Buffer);
    if (Address.QuadPart == 0)
        Address.QuadPart = (ULONG_PTR)Buffer;

    Address.QuadPart >>= PAGE_SHIFT;
    ASSERT3U(Address.HighPart, ==, 0);

    for (Length >>= PAGE_SHIFT; Length != 0; Length--)
        WRITE_PORT_ULONG(PortEC, Address.LowPart++);
}
Esempio n. 3
0
void mmap_add_region(unsigned long base, unsigned long size, unsigned attr)
{
	mmap_region_t *mm = mmap;
	mmap_region_t *mm_last = mm + sizeof(mmap) / sizeof(mmap[0]) - 1;

	assert(IS_PAGE_ALIGNED(base));
	assert(IS_PAGE_ALIGNED(size));

	if (!size)
		return;

	/* Find correct place in mmap to insert new region */
	while (mm->base < base && mm->size)
		++mm;

	/* Make room for new region by moving other regions up by one place */
	memmove(mm + 1, mm, (uintptr_t)mm_last - (uintptr_t)mm);

	/* Check we haven't lost the empty sentinal from the end of the array */
	assert(mm_last->size == 0);

	mm->base = base;
	mm->size = size;
	mm->attr = attr;
}
Esempio n. 4
0
/**
 * @brief Attempt to allocate the n physical pages starting at address addr.
 *
 * @param addr The physical address of the first page to reserve.
 * @param n The number of pages to allocate.
 */
struct page *alloc_pages_at(size_t addr, unsigned long n)
{
	TRACE("addr=0x%x, n=0x%x", addr, n);

	ASSERT(IS_PAGE_ALIGNED(addr));
	ASSERT_EQUALS(zone_containing(addr), zone_containing(addr + PAGE_SIZE * n));

	return __alloc_pages_at(addr, n, zone_containing(addr));
}
Esempio n. 5
0
uintptr_t _EREMOVE(const void *epc_lin_addr)
{
    CEnclaveMngr *mngr = CEnclaveMngr::get_instance();
    CEnclaveSim *ce = mngr->get_enclave(epc_lin_addr);

    GP_ON(!ce);
    GP_ON(!IS_PAGE_ALIGNED(epc_lin_addr));

    return ce->remove_page(epc_lin_addr) ? 0 : -1;
}
Esempio n. 6
0
void core_init_mmu_tables(struct tee_mmap_region *mm)
{
	paddr_t max_pa = 0;
	uint64_t max_va = 0;
	size_t n;

	for (n = 0; mm[n].size; n++) {
		paddr_t pa_end;
		vaddr_t va_end;

		debug_print(" %010" PRIxVA " %010" PRIxPA " %10zx %x",
			    mm[n].va, mm[n].pa, mm[n].size, mm[n].attr);

		assert(IS_PAGE_ALIGNED(mm[n].pa));
		assert(IS_PAGE_ALIGNED(mm[n].size));

		pa_end = mm[n].pa + mm[n].size - 1;
		va_end = mm[n].va + mm[n].size - 1;
		if (pa_end > max_pa)
			max_pa = pa_end;
		if (va_end > max_va)
			max_va = va_end;
	}

	/* Clear table before use */
	memset(l1_xlation_table[0], 0, NUM_L1_ENTRIES * XLAT_ENTRY_SIZE);
	init_xlation_table(mm, 0, l1_xlation_table[0], 1);
	for (n = 1; n < CFG_TEE_CORE_NB_CORE; n++)
		memcpy(l1_xlation_table[n], l1_xlation_table[0],
			XLAT_ENTRY_SIZE * NUM_L1_ENTRIES);

	for (n = 0; n < NUM_L1_ENTRIES; n++) {
		if (!l1_xlation_table[0][n]) {
			user_va_idx = n;
			break;
		}
	}
	assert(user_va_idx != -1);

	tcr_ps_bits = calc_physical_addr_size_bits(max_pa);
	COMPILE_TIME_ASSERT(ADDR_SPACE_SIZE > 0);
	assert(max_va < ADDR_SPACE_SIZE);
}
Esempio n. 7
0
void page_free(void *ptr, size_t pages)
{
#if WITH_KERNEL_VM
    DEBUG_ASSERT(IS_PAGE_ALIGNED((uintptr_t)ptr));

    pmm_free_kpages(ptr, pages);
#else
    novm_free_pages(ptr, pages);
#endif
}
Esempio n. 8
0
/*
 * rpmem_fip_init_memory -- (internal) initialize common memory resources
 */
static int
rpmem_fip_init_memory(struct rpmem_fip *fip)
{
	ASSERTne(Pagesize, 0);
	int ret;

	/*
	 * Register local memory space. The local memory will be used
	 * with WRITE operation in rpmem_fip_persist function thus
	 * the FI_WRITE access flag.
	 */
	ret = fi_mr_reg(fip->domain, fip->laddr, fip->size,
			FI_WRITE, 0, 0, 0, &fip->mr, NULL);
	if (ret) {
		RPMEM_FI_ERR(ret, "registrating memory");
		return ret;
	}

	/* get local memory descriptor */
	fip->mr_desc = fi_mr_desc(fip->mr);

	/* allocate buffer for read operation */
	ASSERT(IS_PAGE_ALIGNED(RPMEM_RD_BUFF_SIZE));
	errno = posix_memalign((void **)&fip->rd_buff, Pagesize,
			RPMEM_RD_BUFF_SIZE);
	if (errno) {
		RPMEM_LOG(ERR, "!allocating read buffer");
		ret = -1;
		goto err_malloc_rd_buff;
	}

	/*
	 * Register buffer for read operation.
	 * The read operation utilizes READ operation thus
	 * the FI_REMOTE_WRITE flag.
	 */
	ret = fi_mr_reg(fip->domain, fip->rd_buff,
			RPMEM_RD_BUFF_SIZE, FI_REMOTE_WRITE,
			0, 0, 0, &fip->rd_mr, NULL);
	if (ret) {
		RPMEM_FI_ERR(ret, "registrating read buffer");
		goto err_rd_mr;
	}

	/* get read buffer local memory descriptor */
	fip->rd_mr_desc = fi_mr_desc(fip->rd_mr);

	return 0;
err_rd_mr:
	free(fip->rd_buff);
err_malloc_rd_buff:
	RPMEM_FI_CLOSE(fip->mr, "unregistering memory");
	return ret;
}
Esempio n. 9
0
File: mmu.c Progetto: chenyuwen/lk
static status_t get_l2_table(arch_aspace_t *aspace, uint32_t l1_index, paddr_t *ppa)
{
    status_t ret;
    paddr_t pa;
    uint32_t tt_entry;

    DEBUG_ASSERT(aspace);
    DEBUG_ASSERT(ppa);

    /* lookup an existing l2 pagetable */
    for (uint i = 0; i < L1E_PER_PAGE; i++) {
        tt_entry = aspace->tt_virt[ROUNDDOWN(l1_index, L1E_PER_PAGE) + i];
        if ((tt_entry & MMU_MEMORY_L1_DESCRIPTOR_MASK)
                == MMU_MEMORY_L1_DESCRIPTOR_PAGE_TABLE) {
            *ppa = (paddr_t)ROUNDDOWN(MMU_MEMORY_L1_PAGE_TABLE_ADDR(tt_entry), PAGE_SIZE)
                   + (PAGE_SIZE / L1E_PER_PAGE) * (l1_index & (L1E_PER_PAGE-1));
            return NO_ERROR;
        }
    }

    /* not found: allocate it */
    uint32_t *l2_va = pmm_alloc_kpages(1, &aspace->pt_page_list);
    if (!l2_va)
        return ERR_NO_MEMORY;

    /* wipe it clean to set no access */
    memset(l2_va, 0, PAGE_SIZE);

    /* get physical address */
    ret = arm_vtop((vaddr_t)l2_va, &pa);
    ASSERT(!ret);
    ASSERT(paddr_to_kvaddr(pa));

    DEBUG_ASSERT(IS_PAGE_ALIGNED((vaddr_t)l2_va));
    DEBUG_ASSERT(IS_PAGE_ALIGNED(pa));

    *ppa = pa + (PAGE_SIZE / L1E_PER_PAGE) * (l1_index & (L1E_PER_PAGE-1));

    LTRACEF("allocated pagetable at %p, pa 0x%lx, pa 0x%lx\n", l2_va, pa, *ppa);
    return NO_ERROR;
}
Esempio n. 10
0
zx_status_t Guest::SetTrap(uint32_t kind, zx_vaddr_t addr, size_t len,
                           fbl::RefPtr<PortDispatcher> port, uint64_t key) {
    if (len == 0) {
        return ZX_ERR_INVALID_ARGS;
    } else if (SIZE_MAX - len < addr) {
        return ZX_ERR_OUT_OF_RANGE;
    }

    switch (kind) {
    case ZX_GUEST_TRAP_MEM:
        if (port) {
            return ZX_ERR_INVALID_ARGS;
        }
        break;
    case ZX_GUEST_TRAP_BELL:
        if (!port) {
            return ZX_ERR_INVALID_ARGS;
        }
        break;
    case ZX_GUEST_TRAP_IO:
        if (port) {
            return ZX_ERR_INVALID_ARGS;
        } else if (addr + len > UINT16_MAX) {
            return ZX_ERR_OUT_OF_RANGE;
        }
        return traps_.InsertTrap(kind, addr, len, ktl::move(port), key);
    default:
        return ZX_ERR_INVALID_ARGS;
    }

    // Common logic for memory-based traps.
    if (!IS_PAGE_ALIGNED(addr) || !IS_PAGE_ALIGNED(len)) {
        return ZX_ERR_INVALID_ARGS;
    }
    zx_status_t status = gpas_->UnmapRange(addr, len);
    if (status != ZX_OK) {
        return status;
    }
    return traps_.InsertTrap(kind, addr, len, ktl::move(port), key);
}
Esempio n. 11
0
// TODO: Should a lock be used to access kernel_pmap?
void pmap_kenter_pa(vaddr_t va, paddr_t pa, vm_prot_t vm_prot, pmap_flags_t pmap_flags) {
  // The mapping must be in the kernel virtual address space
  kassert(va >= (uintptr_t)(&__kernel_virtual_start) && IS_PAGE_ALIGNED(pa) && IS_PAGE_ALIGNED(va));

	// Encode the protection and pmap flags in the page table entry 
  pte_t entry = PTE_CREATE(pa, PTE_AP0_BIT | PTE_S_BIT | PTE_ENCODE_PROTECTION(vm_prot, pmap_kernel()) | PTE_ENCODE_PMAP_FLAGS(pmap_flags));


  // Now we must place the page table entry in the correct kernel page table
  // Since we know that the pgts are laid out contiguously in memory we can cheat by
  // accessing the correct pgt directly without having to loop over the pmap_kernel's pgt_entries
  // to search for the pgt_entry with the correct offset in the pgd.

  // Place the entry in the page table if one doesn't already exist
  pte_t existing_entry = KERNEL_PGTS_BASE[PGD_GET_INDEX(va)-KERNEL_PGD_PGT_INDEX_BASE].pte[PGT_GET_INDEX(va)];
  kassert(!(existing_entry & PTE_PAGE_BIT));
  KERNEL_PGTS_BASE[PGD_GET_INDEX(va)-KERNEL_PGD_PGT_INDEX_BASE].pte[PGT_GET_INDEX(va)] = entry;

  // Update the stats
  pmap_kernel()->pmap_stats.wired_count++;
  pmap_kernel()->pmap_stats.resident_count++;
}
Esempio n. 12
0
// TODO: Implement PMAP_CANFAIL logic
uint32_t pmap_enter(pmap_t *pmap, vaddr_t va, paddr_t pa, vm_prot_t vm_prot, pmap_flags_t pmap_flags) {
  // Must have a valid pmap
  kassert(pmap != NULL && pmap->pgd != NULL && IS_WITHIN_BOUNDS(pa) && IS_PAGE_ALIGNED(pa) && IS_PAGE_ALIGNED(va));

  // Encode the protection bits in the page table entry
  // Encode the protection and pmap flags in the page table entry 
  pte_t entry = PTE_CREATE(pa, PTE_AP0_BIT | PTE_S_BIT | PTE_ENCODE_PROTECTION(vm_prot, pmap_kernel()) | PTE_ENCODE_PMAP_FLAGS(pmap_flags));

  // First check if the page table for the given va exists within the page directory. If not create the page table
  uint32_t pgd_index = PGD_GET_INDEX(va);
  if(!PDE_PGT_EXISTS(pmap->pgd->pde[pgd_index])) {
    // TODO: To get pa of pgt -> TRUNC_PAGE(pgt) and search kernel pgd & pgt for entry
  }


  kassert(entry != 0);
  return 0;
}
uint32_t mesh_flash_op_push(flash_op_type_t type, const flash_op_t* p_op)
{
    if (mp_cb == NULL)
    {
        return NRF_ERROR_INVALID_STATE;
    }
    if (p_op == NULL)
    {
        return NRF_ERROR_NULL;
    }
    if (type == FLASH_OP_TYPE_WRITE)
    {
        if (!IS_WORD_ALIGNED(p_op->write.start_addr) ||
            !IS_WORD_ALIGNED(p_op->write.p_data))
        {
            return NRF_ERROR_INVALID_ADDR;
        }
        if (!IS_WORD_ALIGNED(p_op->write.length) ||
            p_op->write.length == 0)
        {
            return NRF_ERROR_INVALID_LENGTH;
        }
    }
    else if (type == FLASH_OP_TYPE_ERASE)
    {
        if (!IS_PAGE_ALIGNED(p_op->erase.start_addr))
        {
            return NRF_ERROR_INVALID_ADDR;
        }
        if (p_op->erase.length == 0)
        {
            return NRF_ERROR_INVALID_LENGTH;
        }
    }
    else
    {
        return NRF_ERROR_INVALID_PARAM;
    }

    operation_t op;
    op.type = type;
    memcpy(&op.operation, p_op, sizeof(flash_op_t));
    return fifo_push(&m_flash_op_fifo, &op);
}
Esempio n. 14
0
uintptr_t _EADD(page_info_t* pi, void *epc_lin_addr)
{
    void     *src_page = pi->src_page;
    CEnclaveMngr *mngr = CEnclaveMngr::get_instance();
    CEnclaveSim    *ce = mngr->get_enclave(pi->lin_addr);

    if (ce == NULL) {
        SE_TRACE(SE_TRACE_DEBUG, "failed to get enclave instance\n");
        return SGX_ERROR_UNEXPECTED;
    }

    GP_ON(!IS_PAGE_ALIGNED(epc_lin_addr));
    GP_ON((ce->get_secs()->attributes.flags & SGX_FLAGS_INITTED) != 0);

    // Make the page writable before doing memcpy()
    se_virtual_protect(epc_lin_addr, SE_PAGE_SIZE, SI_FLAGS_RW);

    mcp_same_size(epc_lin_addr, src_page, SE_PAGE_SIZE);

    se_virtual_protect(epc_lin_addr, SE_PAGE_SIZE, (uint32_t)pi->sec_info->flags);

    GP_ON(!ce->add_page(pi->lin_addr, pi->sec_info->flags));
    return SGX_SUCCESS;
}
Esempio n. 15
0
int xio_validate_rdma_op(struct xio_sge *lsg_list, size_t lsize,
			 struct xio_sge *rsg_list, size_t rsize,
			 int op_size)
{
	int		l	= 0,
			r	= 0;
	uint64_t	laddr	= lsg_list[0].addr;
	uint64_t	raddr	= rsg_list[0].addr;
	uint32_t	llen	= lsg_list[0].length;
	uint32_t	rlen	= rsg_list[0].length;
	uint32_t	tot_len = 0;

	if (lsize < 1 || rsize < 1) {
		ERROR_LOG("iovec size < 1 lsize:%d, rsize:%d\n", lsize, rsize);
		return -1;
	}

	while (1) {
		if (rlen < llen) {
			r++;
			tot_len	+= rlen;
			if (r == rsize)
				break;
			llen	-= rlen;
			laddr	+= rlen;
			raddr	= rsg_list[r].addr;
			rlen	= rsg_list[r].length;
		} else if (llen < rlen) {
			/* check page alignment when source buff spans more
			 * then one destination buffer */
			if (!IS_PAGE_ALIGNED(
				    lsg_list[l].addr + lsg_list[l].length))
				return -1;
			l++;
			tot_len	+= llen;
			if (l == lsize)
				break;
			rlen	-= llen;
			raddr	+= llen;
			laddr	= lsg_list[l].addr;
			llen	= lsg_list[l].length;

			if (!IS_PAGE_ALIGNED(lsg_list[l].addr))
				return -1;
		} else {
			l++;
			r++;
			tot_len	+= llen;
			if ((l == lsize) || (r == rsize))
				break;

			laddr	= lsg_list[l].addr;
			llen	= lsg_list[l].length;
			raddr	= rsg_list[r].addr;
			rlen	= rsg_list[r].length;
		}
	}

	/* not enough buffers to complete */
	if (tot_len < op_size) {
		ERROR_LOG("iovec exausted\n");
		return -1;
	}

	return 0;
}
Esempio n. 16
0
File: mmu.c Progetto: chenyuwen/lk
int arch_mmu_map(arch_aspace_t *aspace, addr_t vaddr, paddr_t paddr, uint count, uint flags)
{
    LTRACEF("vaddr 0x%lx paddr 0x%lx count %u flags 0x%x\n", vaddr, paddr, count, flags);

    DEBUG_ASSERT(aspace);
    DEBUG_ASSERT(aspace->tt_virt);

    DEBUG_ASSERT(is_valid_vaddr(aspace, vaddr));
    if (!is_valid_vaddr(aspace, vaddr))
        return ERR_OUT_OF_RANGE;

#if !WITH_ARCH_MMU_PICK_SPOT
    if (flags & ARCH_MMU_FLAG_NS) {
        /* WITH_ARCH_MMU_PICK_SPOT is required to support NS memory */
        panic("NS mem is not supported\n");
    }
#endif

    /* paddr and vaddr must be aligned */
    DEBUG_ASSERT(IS_PAGE_ALIGNED(vaddr));
    DEBUG_ASSERT(IS_PAGE_ALIGNED(paddr));
    if (!IS_PAGE_ALIGNED(vaddr) || !IS_PAGE_ALIGNED(paddr))
        return ERR_INVALID_ARGS;

    if (count == 0)
        return NO_ERROR;

    /* see what kind of mapping we can use */
    int mapped = 0;
    while (count > 0) {
        if (IS_SECTION_ALIGNED(vaddr) && IS_SECTION_ALIGNED(paddr) && count >= SECTION_SIZE / PAGE_SIZE) {
            /* we can use a section */

            /* compute the arch flags for L1 sections */
            uint arch_flags = mmu_flags_to_l1_arch_flags(flags) |
                              MMU_MEMORY_L1_DESCRIPTOR_SECTION;

            /* map it */
            arm_mmu_map_section(aspace, paddr, vaddr, arch_flags);
            count -= SECTION_SIZE / PAGE_SIZE;
            mapped += SECTION_SIZE / PAGE_SIZE;
            vaddr += SECTION_SIZE;
            paddr += SECTION_SIZE;
        } else {
            /* will have to use a L2 mapping */
            uint l1_index = vaddr / SECTION_SIZE;
            uint32_t tt_entry = aspace->tt_virt[l1_index];

            LTRACEF("tt_entry 0x%x\n", tt_entry);
            switch (tt_entry & MMU_MEMORY_L1_DESCRIPTOR_MASK) {
                case MMU_MEMORY_L1_DESCRIPTOR_SECTION:
                    // XXX will have to break L1 mapping into a L2 page table
                    PANIC_UNIMPLEMENTED;
                    break;
                case MMU_MEMORY_L1_DESCRIPTOR_INVALID: {
                    paddr_t l2_pa = 0;
                    if (get_l2_table(aspace, l1_index, &l2_pa) != NO_ERROR) {
                        TRACEF("failed to allocate pagetable\n");
                        goto done;
                    }
                    tt_entry = l2_pa | MMU_MEMORY_L1_DESCRIPTOR_PAGE_TABLE;
                    if (flags & ARCH_MMU_FLAG_NS)
                        tt_entry |= MMU_MEMORY_L1_PAGETABLE_NON_SECURE;

                    aspace->tt_virt[l1_index] = tt_entry;
                }
                    /* fallthrough */
                case MMU_MEMORY_L1_DESCRIPTOR_PAGE_TABLE: {
                    uint32_t *l2_table = paddr_to_kvaddr(MMU_MEMORY_L1_PAGE_TABLE_ADDR(tt_entry));
                    LTRACEF("l2_table at %p\n", l2_table);

                    DEBUG_ASSERT(l2_table);

                    // XXX handle 64K pages here

                    /* compute the arch flags for L2 4K pages */
                    uint arch_flags = mmu_flags_to_l2_arch_flags_small_page(flags);

                    uint l2_index = (vaddr % SECTION_SIZE) / PAGE_SIZE;
                    do {
                        l2_table[l2_index++] = paddr | arch_flags;
                        count--;
                        mapped++;
                        vaddr += PAGE_SIZE;
                        paddr += PAGE_SIZE;
                    } while (count && (l2_index != (SECTION_SIZE / PAGE_SIZE)));
                    break;
                }
                default:
                    PANIC_UNIMPLEMENTED;
            }
        }
    }

done:
    DSB;
    return mapped;
}
Esempio n. 17
0
static int elf_load_int(const char *path, task_t *task, char *argv[], char *envp[]) {
	// Loads to a fixed address of 0x10000000 for now; not a HUGE deal
	// since each (user mode) task has its own address space

	assert(interrupts_enabled() == false); // TODO: get rid of the race condition from create_task, so that this isn't needed

	assert(task != NULL);
	struct task_mm *mm = task->mm;
	assert(mm != NULL);

	struct stat st;

	int r;
	if ((r = stat(path, &st)) != 0) {
		assert(r < 0);
		return r;
	}

	uint32 file_size = st.st_size;
	unsigned char *data = kmalloc(file_size);
	int retval = 0;

	int fd = open(path, O_RDONLY);
	if (fd < 0) {
		printk("elf_load(): unable to open %s\n", path);
		retval = fd;
		goto err;
	}

	if ((r = read(fd, data, file_size)) != (int)file_size) {
		printk("elf_load(): unable to read from %s; got %d bytes, requested %d\n", path, r, (int)file_size);
		if (r < 0) {
			retval = r;
			goto err;
		}
		else {
			panic("read() returned less than the expected file size, but not a negative value... why?");
			retval = -EIO;
			goto err;
		}
	}

	close(fd);

	elf_header_t *header = (elf_header_t *)data;

	const unsigned char ELF_IDENT[] = {0x7f, 'E', 'L', 'F'};

	if (memcmp(header->e_ident.ei_mag, &ELF_IDENT, 4) != 0) {
		printk("Warning: file %s is not an ELF file; aborting execution\n", path);
		retval = -ENOEXEC;
		goto err;
	}

	// TODO SECURITY: don't trust anything from the file - users can EASILY execute
	// "forged" ELF files!

	if (header->e_ident.ei_class != ELFCLASS32 || header->e_ident.ei_data != ELFDATA2LSB || \
		header->e_ident.ei_version != 1 || header->e_machine != EM_386 || header->e_type != ET_EXEC) {
		printk("Warning: file %s is not a valid ELF file (invalid ELFCLASS, ELFDATA, version, machine or not ET_EXEC\n");
		retval = -ENOEXEC;
		goto err;
	}

	assert(header->e_entry >= 0x10000000);
	assert(header->e_entry <  0x11000000);

	if (task == current_task) {
		// execve
		assert(current_task->mm != NULL);
		assert(current_task->mm->areas != NULL);
		assert(current_task->mm->page_directory != NULL);
	}

	for (int i=0; i < header->e_phnum; i++) {
		Elf32_Phdr *phdr = (Elf32_Phdr *)(data + header->e_phoff + header->e_phentsize * i);
		if (phdr->p_type == PT_LOAD) {
			// This is a segment to load!

			// Should this be writable to the task?
			bool writable = ((phdr->p_flags & PF_W) ? true : false);

#if ELF_DEBUG
			printk("Segment #%u: copy %u bytes from 0x%08x (data + offset) to 0x%08x (virt in task page dir); read%s\n",
					i, phdr->p_filesz, data + phdr->p_offset, phdr->p_vaddr, writable ? "-write" : "only");
#endif

			if (i == 0)
				assert(phdr->p_vaddr == 0x10000000);
			else
				assert(phdr->p_vaddr > 0x10000000);

			uint32 start_addr = phdr->p_vaddr;
			uint32 start_addr_aligned = (phdr->p_vaddr & 0xfffff000);
			uint32 end_addr   = start_addr + phdr->p_memsz;
			if (!IS_PAGE_ALIGNED(end_addr)) {
				end_addr &= ~(PAGE_SIZE - 1);
				end_addr += PAGE_SIZE;
			}

			if (end_addr > task->mm->brk_start) {
				uint32 new_brk = end_addr;
				if (!IS_PAGE_ALIGNED(new_brk)) {
					new_brk &= ~(PAGE_SIZE - 1);
					new_brk += PAGE_SIZE;
				}
				task->mm->brk_start = new_brk;
				task->mm->brk = new_brk;
			}

			// Allocate memory for this address in the task's address space, set for user mode
			vmm_alloc_user(start_addr_aligned, end_addr, mm, writable);

			// Switch to the new page directory, so that we can copy the data there
			page_directory_t *old_dir = current_directory;
			switch_page_directory(mm->page_directory);

			// Okay, we should have the memory. Let's clear it (since PARTS may be left empty by the memcpy,
			// e.g. the .bss section, and we do want zeroes to be there)
			memset((void *)start_addr_aligned, 0, end_addr - start_addr_aligned);

			// Copy the segment (e.g. .text + .rodata + .eh_frame, or .data + .bss) to the location
			// DO NOT use start_addr_aligned here - we want the program to dictate the exact location

			memcpy((void *)start_addr, data + phdr->p_offset, phdr->p_filesz);

			switch_page_directory(old_dir);
		}
		else if (phdr->p_type == PT_GNU_STACK || phdr->p_type == PT_GNU_RELRO || phdr->p_type == PT_GNU_EH_FRAME) {
			// Quietly ignore
		}
		else
			printk("Warning: skipping unsupported ELF program header (#%u, p_type = 0x%x)\n", i, phdr->p_type);
	}

	// Set up the reentrancy structure for Newlib
	// (It is initialized below, after switching to the new page directory.)
	uint32 reent_size = sizeof(struct _reent);
	if (reent_size & 0xfff) {
		reent_size &= 0xfffff000;
		reent_size += PAGE_SIZE;
	}

	vmm_alloc_user(task->mm->brk, task->mm->brk + reent_size, mm, PAGE_RW);

	//assert(current_directory == kernel_directory);
	page_directory_t *old_dir = current_directory;
	switch_page_directory(task->mm->page_directory);

	task->reent = (struct _reent *)task->mm->brk;
	_REENT_INIT_PTR(task->reent);
	task->mm->brk += reent_size;
	task->mm->brk_start += reent_size;

	assert(IS_PAGE_ALIGNED(task->mm->brk));
	assert(task->mm->brk == task->mm->brk_start);

	// The value brk has when the process starts;
	// userspace may not decrease the brk point below this address
	task->mm->initial_brk = task->mm->brk_start;

	// Copy the argv data from the kernel heap to the task's address space
	// This function updates argv to point to the new location.
	uint32 argc = 0;
	for (; argv[argc] != NULL; argc++) { }
	copy_argv_env_to_task(&argv, argc, task);

	uint32 envc = 0;
	assert(envp != NULL);
	for (; envp[envc] != NULL; envc++) { }
	copy_argv_env_to_task(&envp, envc, task);

	*((uint32 *)(USER_STACK_START - 0)) = (uint32)envp;
	*((uint32 *)(USER_STACK_START - 4)) = (uint32)argv;
	*((uint32 *)(USER_STACK_START - 8)) = (uint32)argc;

	// Update the task's name
	strlcpy((char *)task->name, argv[0], TASK_NAME_LEN);

	if (old_dir != kernel_directory) {
		// execve, stay with the new dir
	}
	else
		switch_page_directory(old_dir);

#if ELF_DEBUG

	printk("File has %u program headers (each %u bytes), %u section headers (each %u bytes)\n",
		   header->e_phnum, header->e_phentsize, header->e_shnum, header->e_shentsize);

	printk("Program Header:\n");
	for (int i=0; i < header->e_phnum; i++) {
		Elf32_Phdr *phdr = (Elf32_Phdr *)(data + header->e_phoff + header->e_phentsize * i);

		if (phdr->p_type == PT_LOAD) {
			printk("LOAD  offset 0x%08x vaddr 0x%08x alignment %u bytes\n", phdr->p_offset, phdr->p_vaddr, phdr->p_align);
			unsigned int f = phdr->p_flags;
			printk("      filesz 0x%08x memsz 0x%08x flags %c%c%c\n", phdr->p_filesz, phdr->p_memsz, 
					(f & PF_R ? 'r' : '-'), (f & PF_W ? 'w' : '-'), (f & PF_X ? 'x' : '-'));
		}
		else {
			printk("unsupported program header (#%u), skipping\n", i);
		}
	}

	// Find the string table
	assert(header->e_shoff != 0); // we need a section header
	Elf32_Shdr *string_table_hdr = (Elf32_Shdr *)(data + header->e_shoff + header->e_shentsize * header->e_shstrndx);
	char *string_table = (char *)(data + string_table_hdr->sh_offset);

	printk("Sections:\n");
	printk("Idx         Name Size     VMA      LMA      File off Align\n");
	for (int i=1; i < header->e_shnum; i++) { // skip #0, which is always empty
		Elf32_Shdr *shdr = (Elf32_Shdr *)(data + header->e_shoff + header->e_shentsize * i);

		char *name = (char *)&string_table[shdr->sh_name];

		printk("%03d %12s %08x %08x %08x %08x %u\n", i, name, shdr->sh_size, shdr->sh_addr, shdr->sh_addr /* TODO: LMA */, shdr->sh_offset, shdr->sh_addralign);
		unsigned int f = shdr->sh_flags;
		printk("                 ");
		if (shdr->sh_type != SHT_NOBITS)
			printk("CONTENTS, ");
		if ((f & SHF_ALLOC))
			printk("ALLOC, ");
		if ((f & SHF_WRITE) == 0)
			printk("READONLY, ");
		if ((f & SHF_EXECINSTR))
			printk("CODE\n");
		else
			printk("DATA\n");
	}
#endif // ELF_DEBUG

	// Try to find symbols, so we can get nice backtrace displays
	Elf32_Sym *symhdr = NULL;
	uint32 num_syms = 0;
	const char *sym_string_table = NULL;
	uint32 string_table_size = 0;

	for (uint32 i=1; i < header->e_shnum; i++) { // skip #0, which is always empty
		Elf32_Shdr *shdr = (Elf32_Shdr *)((uint32)data + header->e_shoff + (header->e_shentsize * i));

		if (shdr->sh_type == SHT_SYMTAB) {
			symhdr = (Elf32_Sym *)(data + shdr->sh_offset);
			num_syms = shdr->sh_size / shdr->sh_entsize;
			Elf32_Shdr *string_table_hdr = (Elf32_Shdr *)((uint32)data + header->e_shoff + shdr->sh_link * header->e_shentsize);
			string_table_size = string_table_hdr->sh_size;
			sym_string_table = (char *)(data + string_table_hdr->sh_offset);
			break;
		}
	}

	// Load symbols for this file, so that we can display them in backtraces
	if (!symhdr || !sym_string_table || num_syms < 1) {
		printk("Warning: failed to load symbols for %s\n", path);
	}
	else {
		// Clone the string table. Because load_symbols doesn't strdup() names
		// for performance reasons, we need the string table to keep existing
		// for as long as the task lives.
		char *old_table = task->symbol_string_table;
		task->symbol_string_table = kmalloc(string_table_size);
		task->symbol_string_table_size = string_table_size;
		memcpy(task->symbol_string_table, sym_string_table, string_table_size);

		if (load_symbols(symhdr, task->symbol_string_table, &task->symbols, num_syms) != 0) {
			printk("Warning: failed to load symbols for %s\n", path);
		}
		else if (old_table) {
			// execve, so free the old one, or it'll leak
			kfree(old_table);
		}
	}

	// If we're still here: set the program entry point
	// (This updates the value on the stack in task.c)
	task->new_entry = (uint32)header->e_entry;
	set_entry_point((task_t *)task, task->new_entry);

	retval = 0;
	/* fall through on success */

err:

	kfree(data);
	assert(retval <= 0);
	return retval;
}
Esempio n. 18
0
void pmm_init(uint32 mbd_mmap_addr, uint32 mbd_mmap_length, uint32 upper_mem) {
	/* upper_mem is provided by GRUB; it's the number of *continuous* kilobytes of memory starting at 1MB (0x100000). */
	mem_end_page = 0x100000 + (uint32)upper_mem*1024;

	/* Ignore the last few bytes of RAM to align, if necessary */
	mem_end_page &= 0xfffff000;

	/*
	 * Check the GRUB memory map a first pass, to see if there are higher addresses
	 * than mem_end_page.
	 * This happens for me in VMware Fusion, but not in QEMU. With 256 MB RAM, QEMU
	 * gives two regions: a small one at below 1 MB, and then 0x100000 - 0x0FEF0000.
	 * VMWare fusion with 256 MB gives an area below 1 MB, 0x100000 - 0x0FEF0000,
	 * but ALSO 0x0FF00000 to 0x10000000.

	 *
	 * Because mem_end_page = 0x0FEF0000 (as it's based on the **continous** size),
	 * nframes is allocated too small, and we get a buffer overflow (or, rather,
	 * the assertions that prevent that fail, and we get a kernel panic).
	 *
	 * To solve this, we first check how high the largest physical address is, and
	 * then allocate based on that. Finally, we make a second pass through the map
	 * to actually set things up. This pass is only to find out how many frames
	 * there will be.
	 */
	if (mbd_mmap_addr != 0 && mbd_mmap_length != 0) {
		// We got a memory map from GRUB

		for (memory_map_t *memmap = (memory_map_t *)mbd_mmap_addr; (uint32)memmap < mbd_mmap_addr + mbd_mmap_length; memmap++) {
			if (memmap->type != 1) {
				continue;
			}

			if (memmap->base_addr_high != 0) {
				continue;
			}

			/* Page align addresses etc. */
			uint32 addr_lo = memmap->base_addr_low;
			if (addr_lo < PAGE_SIZE) // ignore the first page
				addr_lo = PAGE_SIZE;
			if (addr_lo & 0xfff) {
				addr_lo &= 0xfffff000;
				addr_lo += PAGE_SIZE;
			}

			uint32 addr_hi = addr_lo + (memmap->length_low);
			if (addr_hi & 0xfff)
				addr_hi &= 0xfffff000;

			if (addr_hi > mem_end_page) {
				mem_end_page = addr_hi;
			}
		}
	}

	/* The size of the bitmap is one bit per page */
	nframes = mem_end_page / PAGE_SIZE;

	/* allocate and initialize the bitmap */
	used_frames = (uint32 *)kmalloc((nframes / 32 + 1) * sizeof(uint32));

	// Set all frames to used, and clear the free areas below
	// (Reserved areas are set to "used", and never cleared, so they are always left alone.)
	memset(used_frames, 0xff, (nframes / 32 + 1) * sizeof(uint32));

	last_allocated_frame = 0xffffffff; // we can't use 0 since that's a valid frame

	INTERRUPT_LOCK;

	/*
	 * Utilize the GRUB memory map, if we got one.
	 * All frames are set to used (above), so that reserved areas
	 * are never used.
	 * Then, we loop through the frames in the free areas, and
	 * set them to free, so that they can be allocated by pmm_alloc*.
	 * We only use full pages/frames, so if there's memory available
	 * from 0x500 to 0x9f400, we use the range [0x1000, 0x9f000) and
	 * ignore the rest.
	 */
	if (mbd_mmap_addr != 0 && mbd_mmap_length != 0) {
		// We got a memory map from GRUB

		for (memory_map_t *memmap = (memory_map_t *)mbd_mmap_addr; (uint32)memmap < mbd_mmap_addr + mbd_mmap_length; memmap++) {
			if (memmap->type != 1) {
				// type == 1 means this area is free; all other types are reserved
				// and not available for use
				continue;
			}
#if 0
			printk("entry 0x%p: base 0x%p%p length 0x%p%p (free)\n", memmap,
				memmap->base_addr_high,
				memmap->base_addr_low,
				memmap->length_high,
				memmap->length_low);
#endif

			if (memmap->base_addr_high != 0) {
				printk("Warning: ignoring available RAM area above 4 GB\n");
				continue;
			}
			if (memmap->length_high != 0) {
				printk("Warning: ignoring part of available RAM (length > 4 GB)\n");
				// no continue, let's use the low 32 bits
			}

			/* Page align addresses etc. */
			uint32 addr_lo = memmap->base_addr_low;
			if (addr_lo < PAGE_SIZE) // ignore the first page
				addr_lo = PAGE_SIZE;
			if (addr_lo & 0xfff) {
				addr_lo &= 0xfffff000;
				addr_lo += PAGE_SIZE;
			}

			uint32 addr_hi = addr_lo + (memmap->length_low);

			if (memmap->base_addr_low < PAGE_SIZE) {
				// We adjusted the start of addr_lo above, without adjusting the length.
				// Since changing memmap-> seems like a bad idea, adjust addr_hi here instead.
				addr_hi -= PAGE_SIZE;
			}

			if (addr_hi & 0xfff)
				addr_hi &= 0xfffff000;

			if (addr_lo >= addr_hi) {
				// TODO: is this logic correct?
				// This is probably very unlikely, but not impossible; we used up this area in alignment
				continue;
			}

			// Make sure the alignment worked and won't cause us to
			// access memory outside the area
			assert(IS_PAGE_ALIGNED(addr_lo));
			assert(IS_PAGE_ALIGNED(addr_hi));
			assert(addr_lo >= memmap->base_addr_low);

			// Clear the addresses in this area
			uint32 addr;
			for (addr = addr_lo; addr < addr_hi; addr += PAGE_SIZE) {
				assert(addr < memmap->base_addr_low + memmap->length_low);
				_pmm_clear_frame(addr);
			}
		}
	}
	else {
		printk("Warning: no GRUB memory map found; ignoring/wasting all RAM below 1 MB\n");
		for (uint32 addr = 0x100000; addr < mem_end_page; addr += PAGE_SIZE) {
			// I would optimize this (memset() most of it), but even with 4 GB RAM,
			// QEMU does this in a lot less than a second... real computers are likely
			// faster.
			_pmm_clear_frame(addr);
		}
	}

	INTERRUPT_UNLOCK;
}
Esempio n. 19
0
File: mmu.c Progetto: chenyuwen/lk
int arch_mmu_unmap(arch_aspace_t *aspace, vaddr_t vaddr, uint count)
{
    DEBUG_ASSERT(aspace);
    DEBUG_ASSERT(aspace->tt_virt);

    DEBUG_ASSERT(is_valid_vaddr(aspace, vaddr));

    if (!is_valid_vaddr(aspace, vaddr))
        return ERR_OUT_OF_RANGE;

    DEBUG_ASSERT(IS_PAGE_ALIGNED(vaddr));
    if (!IS_PAGE_ALIGNED(vaddr))
        return ERR_INVALID_ARGS;

    LTRACEF("vaddr 0x%lx count %u\n", vaddr, count);

    int unmapped = 0;
    while (count > 0) {
        uint l1_index = vaddr / SECTION_SIZE;
        uint32_t tt_entry = aspace->tt_virt[l1_index];

        switch (tt_entry & MMU_MEMORY_L1_DESCRIPTOR_MASK) {
            case MMU_MEMORY_L1_DESCRIPTOR_INVALID: {
                /* this top level page is not mapped, move on to the next one */
                uint page_cnt = MIN((SECTION_SIZE - (vaddr % SECTION_SIZE)) / PAGE_SIZE, count);
                vaddr += page_cnt * PAGE_SIZE;
                count -= page_cnt;
                break;
            }
            case MMU_MEMORY_L1_DESCRIPTOR_SECTION:
                if (IS_SECTION_ALIGNED(vaddr) && count >= SECTION_SIZE / PAGE_SIZE) {
                    /* we're asked to remove at least all of this section, so just zero it out */
                    // XXX test for supersection
                    arm_mmu_unmap_section(aspace, vaddr);

                    vaddr += SECTION_SIZE;
                    count -= SECTION_SIZE / PAGE_SIZE;
                    unmapped += SECTION_SIZE / PAGE_SIZE;
                } else {
                    // XXX handle unmapping just part of a section
                    // will need to convert to a L2 table and then unmap the parts we are asked to
                    PANIC_UNIMPLEMENTED;
                }
                break;
            case MMU_MEMORY_L1_DESCRIPTOR_PAGE_TABLE: {
                uint32_t *l2_table = paddr_to_kvaddr(MMU_MEMORY_L1_PAGE_TABLE_ADDR(tt_entry));
                uint page_idx = (vaddr % SECTION_SIZE) / PAGE_SIZE;
                uint page_cnt = MIN((SECTION_SIZE / PAGE_SIZE) - page_idx, count);

                /* unmap page run */
                for (uint i = 0; i < page_cnt; i++) {
                    l2_table[page_idx++] = 0;
                }
                DSB;

                /* invalidate tlb */
                for (uint i = 0; i < page_cnt; i++) {
                    arm_invalidate_tlb_mva_no_barrier(vaddr);
                    vaddr += PAGE_SIZE;
                }
                count -= page_cnt;
                unmapped += page_cnt;

                /*
                 * Check if all pages related to this l1 entry are deallocated.
                 * We only need to check pages that we did not clear above starting
                 * from page_idx and wrapped around SECTION.
                 */
                page_cnt = (SECTION_SIZE / PAGE_SIZE) - page_cnt;
                while (page_cnt) {
                    if (page_idx == (SECTION_SIZE / PAGE_SIZE))
                        page_idx = 0;
                    if (l2_table[page_idx++])
                        break;
                    page_cnt--;
                }
                if (!page_cnt) {
                    /* we can kill l1 entry */
                    arm_mmu_unmap_l1_entry(aspace->tt_virt, l1_index);

                    /* try to free l2 page itself */
                    put_l2_table(aspace, l1_index, MMU_MEMORY_L1_PAGE_TABLE_ADDR(tt_entry));
                }
                break;
            }

            default:
                // XXX not implemented supersections or L2 tables
                PANIC_UNIMPLEMENTED;
        }
    }
    arm_after_invalidate_tlb_barrier();
    return unmapped;
}
/*
 * Function that verifies that a region can be mapped.
 * Returns:
 *        0: Success, the mapping is allowed.
 *   EINVAL: Invalid values were used as arguments.
 *   ERANGE: The memory limits were surpassed.
 *   ENOMEM: There is not enough memory in the mmap array.
 *    EPERM: Region overlaps another one in an invalid way.
 */
static int mmap_add_region_check(const xlat_ctx_t *ctx, const mmap_region_t *mm)
{
	unsigned long long base_pa = mm->base_pa;
	uintptr_t base_va = mm->base_va;
	size_t size = mm->size;
	size_t granularity = mm->granularity;

	unsigned long long end_pa = base_pa + size - 1U;
	uintptr_t end_va = base_va + size - 1U;

	if (!IS_PAGE_ALIGNED(base_pa) || !IS_PAGE_ALIGNED(base_va) ||
			!IS_PAGE_ALIGNED(size))
		return -EINVAL;

	if ((granularity != XLAT_BLOCK_SIZE(1U)) &&
		(granularity != XLAT_BLOCK_SIZE(2U)) &&
		(granularity != XLAT_BLOCK_SIZE(3U))) {
		return -EINVAL;
	}

	/* Check for overflows */
	if ((base_pa > end_pa) || (base_va > end_va))
		return -ERANGE;

	if ((base_va + (uintptr_t)size - (uintptr_t)1) > ctx->va_max_address)
		return -ERANGE;

	if ((base_pa + (unsigned long long)size - 1ULL) > ctx->pa_max_address)
		return -ERANGE;

	/* Check that there is space in the ctx->mmap array */
	if (ctx->mmap[ctx->mmap_num - 1].size != 0U)
		return -ENOMEM;

	/* Check for PAs and VAs overlaps with all other regions */
	for (const mmap_region_t *mm_cursor = ctx->mmap;
	     mm_cursor->size != 0U; ++mm_cursor) {

		uintptr_t mm_cursor_end_va = mm_cursor->base_va
							+ mm_cursor->size - 1U;

		/*
		 * Check if one of the regions is completely inside the other
		 * one.
		 */
		bool fully_overlapped_va =
			((base_va >= mm_cursor->base_va) &&
					(end_va <= mm_cursor_end_va)) ||
			((mm_cursor->base_va >= base_va) &&
						(mm_cursor_end_va <= end_va));

		/*
		 * Full VA overlaps are only allowed if both regions are
		 * identity mapped (zero offset) or have the same VA to PA
		 * offset. Also, make sure that it's not the exact same area.
		 * This can only be done with static regions.
		 */
		if (fully_overlapped_va) {

#if PLAT_XLAT_TABLES_DYNAMIC
			if (((mm->attr & MT_DYNAMIC) != 0U) ||
			    ((mm_cursor->attr & MT_DYNAMIC) != 0U))
				return -EPERM;
#endif /* PLAT_XLAT_TABLES_DYNAMIC */
			if ((mm_cursor->base_va - mm_cursor->base_pa) !=
							(base_va - base_pa))
				return -EPERM;

			if ((base_va == mm_cursor->base_va) &&
						(size == mm_cursor->size))
				return -EPERM;

		} else {
			/*
			 * If the regions do not have fully overlapping VAs,
			 * then they must have fully separated VAs and PAs.
			 * Partial overlaps are not allowed
			 */

			unsigned long long mm_cursor_end_pa =
				     mm_cursor->base_pa + mm_cursor->size - 1U;

			bool separated_pa = (end_pa < mm_cursor->base_pa) ||
				(base_pa > mm_cursor_end_pa);
			bool separated_va = (end_va < mm_cursor->base_va) ||
				(base_va > mm_cursor_end_va);

			if (!separated_va || !separated_pa)
				return -EPERM;
		}
	}

	return 0;
}
Esempio n. 21
0
/*
 * rpmem_fip_init_lanes_apm -- (internal) initialize lanes for APM
 */
static int
rpmem_fip_init_lanes_apm(struct rpmem_fip *fip)
{
	ASSERTne(Pagesize, 0);
	int ret;

	/* allocate APM lanes */
	fip->lanes.apm = calloc(1, fip->nlanes * sizeof(*fip->lanes.apm));
	if (!fip->lanes.apm) {
		RPMEM_LOG(ERR, "!allocating APM lanes");
		goto err_malloc_lanes;
	}

	ASSERT(IS_PAGE_ALIGNED(RPMEM_RAW_BUFF_SIZE));
	errno = posix_memalign((void **)&fip->raw_buff, Pagesize,
			RPMEM_RAW_BUFF_SIZE);
	if (errno) {
		RPMEM_LOG(ERR, "!allocating APM RAW buffer");
		goto err_malloc_raw;
	}

	/* register read-after-write buffer */
	ret = fi_mr_reg(fip->domain, fip->raw_buff, RPMEM_RAW_BUFF_SIZE,
			FI_REMOTE_WRITE, 0, 0, 0, &fip->raw_mr, NULL);
	if (ret) {
		RPMEM_FI_ERR(ret, "registering APM read buffer");
		goto err_fi_raw_mr;
	}

	/* get read-after-write buffer local descriptor */
	fip->raw_mr_desc = fi_mr_desc(fip->raw_mr);

	/*
	 * Initialize all required structures for:
	 * WRITE and READ operations.
	 *
	 * If the completion is required the FI_COMPLETION flag and
	 * appropriate context should be used.
	 *
	 * In APM only the READ completion is required.
	 * The context is a lane structure.
	 */
	unsigned i;
	for (i = 0; i < fip->nlanes; i++) {
		ret = rpmem_fip_lane_init(&fip->lanes.apm[i].lane);
		if (ret)
			goto err_lane_init;

		/* WRITE */
		rpmem_fip_rma_init(&fip->lanes.apm[i].write,
				fip->mr_desc, 0,
				fip->rkey,
				&fip->lanes.apm[i],
				0);

		/* READ */
		rpmem_fip_rma_init(&fip->lanes.apm[i].read,
				fip->raw_mr_desc, 0,
				fip->rkey,
				&fip->lanes.apm[i],
				FI_COMPLETION);
	}

	return 0;
err_lane_init:
	for (unsigned j = 0; j < i; j++)
		rpmem_fip_lane_fini(&fip->lanes.apm[i].lane);
err_fi_raw_mr:
	free(fip->raw_buff);
err_malloc_raw:
	free(fip->lanes.apm);
err_malloc_lanes:
	return -1;
}
static void flash_bank_entry(void)
{
    bl_info_bank_t* p_bank_entry = mp_bank_entry; /* make local copy to avoid race conditions */
    if (p_bank_entry == NULL)
    {
        return;
    }

    bl_info_entry_t bank_entry_replacement;
    memcpy(&bank_entry_replacement, p_bank_entry, sizeof(bl_info_bank_t));
    switch (p_bank_entry->state)
    {
        case BL_INFO_BANK_STATE_IDLE:
            {
                m_waiting_for_idle = true;
                bank_entry_replacement.bank.state = BL_INFO_BANK_STATE_FLASH_FW;
                bootloader_info_entry_overwrite((bl_info_type_t) (BL_INFO_TYPE_BANK_BASE + m_dfu_type), &bank_entry_replacement);

                /* Wait for this to take effect before moving on, as the
                   potential mbr commands in the flash_fw state may trigger
                   sudden reboots. */
                return;
            }

        case BL_INFO_BANK_STATE_FLASH_FW:
            switch (m_dfu_type)
            {
                case DFU_TYPE_BOOTLOADER:
                    /* Check to see if the bank transfer has been executed */
                    if (memcmp(p_bank_entry->p_bank_addr,
                                (uint32_t*) bootloader_info_entry_get(BL_INFO_TYPE_SEGMENT_BL)->segment.start,
                                p_bank_entry->length) != 0)
                    {
                        /* move the bank with MBR. BOOTLOADERADDR() must
                           have been set. */
                        sd_mbr_command_t sd_mbr_cmd;

                        sd_mbr_cmd.command               = SD_MBR_COMMAND_COPY_BL;
                        sd_mbr_cmd.params.copy_bl.bl_src = p_bank_entry->p_bank_addr;
                        sd_mbr_cmd.params.copy_bl.bl_len = p_bank_entry->length / sizeof(uint32_t);
                        APP_ERROR_CHECK(sd_mbr_command(&sd_mbr_cmd));
                        return; /* Can't be reached, only here for readability. */
                    }
                    else
                    {
                        bank_entry_replacement.bank.state = BL_INFO_BANK_STATE_FLASH_META;
                        bootloader_info_entry_overwrite(BL_INFO_TYPE_BANK_BL, &bank_entry_replacement);
                    }
                    break;

                case DFU_TYPE_SD:
                    /* Check to see if the bank transfer has been executed */
                    if (memcmp(p_bank_entry->p_bank_addr,
                                (uint32_t*) bootloader_info_entry_get(BL_INFO_TYPE_SEGMENT_SD)->segment.start,
                                p_bank_entry->length) != 0)
                    {
                        /* move the bank with MBR. */
                        sd_mbr_command_t sd_mbr_cmd;

                        sd_mbr_cmd.command               = SD_MBR_COMMAND_COPY_SD;
                        sd_mbr_cmd.params.copy_sd.src    = p_bank_entry->p_bank_addr;
                        sd_mbr_cmd.params.copy_sd.len    = p_bank_entry->length / sizeof(uint32_t);
                        sd_mbr_cmd.params.copy_sd.dst    = (uint32_t*) 0x1000;
                        APP_ERROR_CHECK(sd_mbr_command(&sd_mbr_cmd));
                        return; /* Can't be reached, only here for readability. */
                    }
                    else
                    {
                        bank_entry_replacement.bank.state = BL_INFO_BANK_STATE_FLASH_META;
                        bootloader_info_entry_overwrite((bl_info_type_t) (BL_INFO_TYPE_BANK_BASE + m_dfu_type), &bank_entry_replacement);
                    }
                    break;

                case DFU_TYPE_APP:
                    /* This nukes the call stack and any flash-callbacks on the
                       app side. If we're in the application, we have to jump
                       to bootloader. */
                    if (bootloader_is_in_application())
                    {
                        /* All paths leading to this call warns about this
                           reset. We'll come back to finalize the transfer
                           after the reset. */
                        __LOG("IN APP MODE. RESET!\n");

#if 1 //def SOFTDEVICE_PRESENT
                        sd_power_reset_reason_clr(0x0F000F);

#if NORDIC_SDK_VERSION >= 11
                        sd_power_gpregret_set(0, RBC_MESH_GPREGRET_CODE_GO_TO_APP);
#else
                        sd_power_gpregret_set(RBC_MESH_GPREGRET_CODE_GO_TO_APP);
#endif
                        sd_nvic_SystemReset();
#else
                        NRF_POWER->RESETREAS = 0x0F000F; /* erase reset-reason to avoid wrongful state-readout on reboot */
                        NRF_POWER->GPREGRET = RBC_MESH_GPREGRET_CODE_GO_TO_APP;
                        NVIC_SystemReset();
#endif
                    }
                    else
                    {
                        /* Erase, Flash the FW, flash FW flag, flash the signature, erase the bank entry. */
                        bl_info_entry_t* p_app_entry = bootloader_info_entry_get(BL_INFO_TYPE_SEGMENT_APP);

                        APP_ERROR_CHECK_BOOL(p_app_entry != NULL);
                        APP_ERROR_CHECK_BOOL(IS_PAGE_ALIGNED(p_app_entry->segment.start));

                        /* Erase existing FW */
                        bl_evt_t flash_evt;
                        flash_evt.type = BL_EVT_TYPE_FLASH_ERASE;
                        flash_evt.params.flash.erase.start_addr = p_app_entry->segment.start;
                        flash_evt.params.flash.erase.length = ((p_bank_entry->length + (PAGE_SIZE - 1)) & ~(PAGE_SIZE - 1)); /* Pad the rest of the page */
                        if (bootloader_evt_send(&flash_evt) != NRF_SUCCESS)
                        {
                            m_waiting_for_idle = true;
                            return;
                        }

                        /* Flash bank */
                        flash_evt.type = BL_EVT_TYPE_FLASH_WRITE;
                        flash_evt.params.flash.write.p_data = (uint8_t*) p_bank_entry->p_bank_addr;
                        flash_evt.params.flash.write.length = p_bank_entry->length;
                        flash_evt.params.flash.write.start_addr = p_app_entry->segment.start;
                        if (bootloader_evt_send(&flash_evt) != NRF_SUCCESS)
                        {
                            m_waiting_for_idle = true;
                            return;
                        }

                        /* Update state */
                        bank_entry_replacement.bank.state = BL_INFO_BANK_STATE_FLASH_META;
                        bootloader_info_entry_overwrite((bl_info_type_t) (BL_INFO_TYPE_BANK_BASE + m_dfu_type), &bank_entry_replacement);
                    }
                    break;

                default:
                    APP_ERROR_CHECK(NRF_ERROR_INVALID_DATA);
                    break;
            }
            /* deliberate fallthrough */
        case BL_INFO_BANK_STATE_FLASH_META:
            {
                bl_info_entry_t fwid_entry;
                bl_info_type_t signature_type;
                bl_info_entry_t flags_entry;
                bl_info_entry_t* p_old_fwid_entry  = bootloader_info_entry_get(BL_INFO_TYPE_VERSION);
                bl_info_entry_t* p_old_flags_entry = bootloader_info_entry_get(BL_INFO_TYPE_FLAGS);
                APP_ERROR_CHECK_BOOL(p_old_fwid_entry);
                APP_ERROR_CHECK_BOOL(p_bank_entry);

                memcpy(&fwid_entry, p_old_fwid_entry, sizeof(bl_info_version_t));
                memcpy(&flags_entry, p_old_flags_entry, sizeof(bl_info_flags_t));
                switch (m_dfu_type)
                {
                    case DFU_TYPE_SD:
                        fwid_entry.version.sd = p_bank_entry->fwid.sd;
                        signature_type = BL_INFO_TYPE_SIGNATURE_SD;
                        flags_entry.flags.sd_intact = true;
                        break;
                    case DFU_TYPE_BOOTLOADER:
                        fwid_entry.version.bootloader.id  = p_bank_entry->fwid.bootloader.id;
                        fwid_entry.version.bootloader.ver = p_bank_entry->fwid.bootloader.ver;
                        signature_type = BL_INFO_TYPE_SIGNATURE_BL;
                        flags_entry.flags.bl_intact = true;
                        break;
                    case DFU_TYPE_APP:
                        fwid_entry.version.app.company_id   = p_bank_entry->fwid.app.company_id;
                        fwid_entry.version.app.app_id       = p_bank_entry->fwid.app.app_id;
                        fwid_entry.version.app.app_version  = p_bank_entry->fwid.app.app_version;
                        signature_type = BL_INFO_TYPE_SIGNATURE_APP;
                        flags_entry.flags.app_intact = true;
                        break;
                    default:
                        APP_ERROR_CHECK(NRF_ERROR_INVALID_DATA);
                        return;
                }
                if (!bootloader_info_entry_put(BL_INFO_TYPE_VERSION,
                            &fwid_entry,
                            BL_INFO_LEN_FWID))
                {
                    m_waiting_for_idle = true;
                    return;
                }
                if (p_bank_entry->has_signature)
                {
                    if (!bootloader_info_entry_put(signature_type,
                                (bl_info_entry_t*) p_bank_entry->signature,
                                BL_INFO_LEN_SIGNATURE))
                    {
                        m_waiting_for_idle = true;
                        return;
                    }
                }
                if (!bootloader_info_entry_put(BL_INFO_TYPE_FLAGS,
                            &flags_entry,
                            BL_INFO_LEN_FLAGS))
                {
                    m_waiting_for_idle = true;
                    return;
                }

                /* Update state */
                __LOG("Bank: Set state to FLASHED\n");
                bank_entry_replacement.bank.state = BL_INFO_BANK_STATE_FLASHED;
                bootloader_info_entry_overwrite((bl_info_type_t) (BL_INFO_TYPE_BANK_BASE + m_dfu_type), &bank_entry_replacement);

            }
            /* deliberate fallthrough */
        case BL_INFO_BANK_STATE_FLASHED:
            /* We may invalidate the bank entry in the device page now,
               it's all redundant. */
            __LOG("Bank: Invalidate.\n");
            if (bootloader_info_entry_invalidate((bl_info_type_t) (BL_INFO_TYPE_BANK_BASE + m_dfu_type)) == NRF_SUCCESS)
            {
                __LOG("Bank invalidated.\n");
                mp_bank_entry = NULL; /* reset the static bank pointer, as we no longer need it. */
            }
            else
            {
                m_waiting_for_idle = true;
            }
            break;

    }
}
Esempio n. 23
0
void _SE3(uintptr_t xax, uintptr_t xbx,
          uintptr_t xcx, uintptr_t xdx,
          uintptr_t xsi, uintptr_t xdi)
{
    UNUSED(xdx);

    switch (xax)
    {
    case SE_EENTER:
        uintptr_t     xip;
        void        * enclave_base_addr;
        se_pt_regs_t* p_pt_regs;
        tcs_t*        tcs;
        tcs_sim_t*    tcs_sim;
        ssa_gpr_t*    p_ssa_gpr;
        secs_t*       secs;
        CEnclaveMngr* mngr;
        CEnclaveSim*    ce;

        // xbx contains the address of a TCS
        tcs = reinterpret_cast<tcs_t*>(xbx);

        // Is TCS pointer page-aligned?
        GP_ON_EENTER(!IS_PAGE_ALIGNED(tcs));

        mngr = CEnclaveMngr::get_instance();
        assert(mngr != NULL);

        // Is it really a TCS?
        ce = mngr->get_enclave(tcs);
        GP_ON_EENTER(ce == NULL);
        GP_ON_EENTER(!ce->is_tcs_page(tcs));

        // Check the EntryReason
        tcs_sim = reinterpret_cast<tcs_sim_t *>(tcs->reserved);
        GP_ON_EENTER(tcs_sim->tcs_state != TCS_STATE_INACTIVE);
        GP_ON_EENTER(tcs->cssa >= tcs->nssa);

        secs = ce->get_secs();
        enclave_base_addr = secs->base;

        p_ssa_gpr = reinterpret_cast<ssa_gpr_t*>(reinterpret_cast<uintptr_t>(enclave_base_addr) + static_cast<size_t>(tcs->ossa)
                + secs->ssa_frame_size * SE_PAGE_SIZE
                - sizeof(ssa_gpr_t));

        tcs_sim->saved_aep = xcx;

        p_pt_regs = reinterpret_cast<se_pt_regs_t*>(get_bp());
        p_ssa_gpr->REG(bp_u) = p_pt_regs->xbp;

        p_ssa_gpr->REG(sp_u) = reinterpret_cast<uintptr_t>(p_pt_regs + 1);
        xcx = p_pt_regs->xip;

        xip = reinterpret_cast<uintptr_t>(enclave_base_addr);
        GP_ON_EENTER(xip == 0);

        //set the _tls_array to point to the self_addr of TLS section inside the enclave
        GP_ON_EENTER(td_mngr_set_td(enclave_base_addr, tcs) == false);
 
        // Destination depends on STATE
        xip += (uintptr_t)tcs->oentry;
        tcs_sim->tcs_state = TCS_STATE_ACTIVE;

        // Link the TCS to the thread
        GP_ON_EENTER((secs->attributes.flags & SGX_FLAGS_INITTED) == 0);

        // Replace the return address on the stack with the enclave entry,
        // so that when we return from this function, we'll enter the enclave.
        enclu_regs_t regs;
        regs.xax = tcs->cssa;
        regs.xbx = reinterpret_cast<uintptr_t>(tcs);
        regs.xcx = xcx;
        regs.xdx = 0;
        regs.xsi = xsi;
        regs.xdi = xdi;
        regs.xbp = p_ssa_gpr->REG(bp_u);
        regs.xsp = p_ssa_gpr->REG(sp_u);
        regs.xip = xip;

        load_regs(&regs);

        // Returning from this function enters the enclave
        return;
    default:
        // There's only 1 ring 3 instruction outside the enclave: EENTER.
        GP();
    }
}