Exemplo n.º 1
0
/*
 * Called when the module is first loaded, this routine loads the configuration
 * file into the SPA namespace.  It does not actually open or load the pools; it
 * only populates the namespace.
 */
void
spa_config_load(void)
{
	void *buf = NULL;
	nvlist_t *nvlist, *child;
	nvpair_t *nvpair;
	char *pathname;
	struct _buf *file;
	uint64_t fsize;

	/*
	 * Open the configuration file.
	 */
	pathname = kmem_alloc(MAXPATHLEN, KM_SLEEP);

	(void) snprintf(pathname, MAXPATHLEN, "%s", spa_config_path);

	file = kobj_open_file(pathname);

	kmem_free(pathname, MAXPATHLEN);

	if (file == (struct _buf *)-1)
		return;

	if (kobj_get_filesize(file, &fsize) != 0)
		goto out;

	buf = kmem_alloc(fsize, KM_SLEEP);

	/*
	 * Read the nvlist from the file.
	 */
	if (kobj_read_file(file, buf, fsize, 0) < 0)
		goto out;

	/*
	 * Unpack the nvlist.
	 */
	if (nvlist_unpack(buf, fsize, &nvlist, KM_SLEEP) != 0)
		goto out;

	/*
	 * Iterate over all elements in the nvlist, creating a new spa_t for
	 * each one with the specified configuration.
	 */
	mutex_enter(&spa_namespace_lock);
	nvpair = NULL;
	while ((nvpair = nvlist_next_nvpair(nvlist, nvpair)) != NULL) {
		if (nvpair_type(nvpair) != DATA_TYPE_NVLIST)
			continue;

		VERIFY(nvpair_value_nvlist(nvpair, &child) == 0);

		if (spa_lookup(nvpair_name(nvpair)) != NULL)
			continue;
		(void) spa_add(nvpair_name(nvpair), child, NULL);
	}
	mutex_exit(&spa_namespace_lock);

	nvlist_free(nvlist);

out:
	if (buf != NULL)
		kmem_free(buf, fsize);

	kobj_close_file(file);
}
Exemplo n.º 2
0
static int
splat_kobj_test2(struct file *file, void *arg)
{
	struct _buf *f;
	char *buf;
	uint64_t size;
	int rc;

	f = kobj_open_file(SPLAT_KOBJ_TEST_FILE);
	if (f == (struct _buf *)-1) {
		splat_vprint(file, SPLAT_KOBJ_TEST2_NAME, "Failed to open "
			     "test file: %s\n", SPLAT_KOBJ_TEST_FILE);
		return -ENOENT;
	}

	rc = kobj_get_filesize(f, &size);
	if (rc) {
		splat_vprint(file, SPLAT_KOBJ_TEST2_NAME, "Failed stat of "
			     "test file: %s (%d)\n", SPLAT_KOBJ_TEST_FILE, rc);
		goto out;
	}

	buf = kmalloc(size + 1, GFP_KERNEL);
	if (!buf) {
		rc = -ENOMEM;
		splat_vprint(file, SPLAT_KOBJ_TEST2_NAME, "Failed to alloc "
			     "%lld bytes for tmp buffer (%d)\n",
			     (long long)size, rc);
		goto out;
	}

	memset(buf, 0, size + 1);
	rc = kobj_read_file(f, buf, size, 0);
	if (rc < 0) {
		splat_vprint(file, SPLAT_KOBJ_TEST2_NAME, "Failed read of "
			     "test file: %s (%d)\n", SPLAT_KOBJ_TEST_FILE, rc);
		goto out2;
	}

	/* Validate we read as many bytes as expected based on the stat.  This
	 * isn't a perfect test since we didn't create the file however it is
	 * pretty unlikely there are garbage characters in your /etc/fstab */
	if (size != (uint64_t)strlen(buf)) {
		rc = -EFBIG;
		splat_vprint(file, SPLAT_KOBJ_TEST2_NAME, "Stat'ed size "
			     "(%lld) does not match number of bytes read "
			     "(%lld)\n", (long long)size,
			     (long long)strlen(buf));
		goto out2;
	}

	rc = 0;
	splat_vprint(file, SPLAT_KOBJ_TEST2_NAME, "\n%s\n", buf);
	splat_vprint(file, SPLAT_KOBJ_TEST2_NAME, "Successfully stat'ed "
		     "and read expected number of bytes (%lld) from test "
		     "file: %s\n", (long long)size, SPLAT_KOBJ_TEST_FILE);
out2:
	kfree(buf);
out:
	kobj_close_file(f);

        return rc;
} /* splat_kobj_test2() */
Exemplo n.º 3
0
/*
 * This function performs the following tasks:
 * - Read the sizes of the new kernel and boot archive.
 * - Allocate memory for the new kernel and boot archive.
 * - Allocate memory for page tables necessary for mapping the memory
 *   allocated for the files.
 * - Read the new kernel and boot archive into memory.
 * - Map in the fast reboot switcher.
 * - Load the fast reboot switcher to FASTBOOT_SWTCH_PA.
 * - Build the new multiboot_info structure
 * - Build page tables for the low 1G of physical memory.
 * - Mark the data structure as valid if all steps have succeeded.
 */
void
fastboot_load_kernel(char *mdep)
{
	void		*buf = NULL;
	int		i;
	fastboot_file_t	*fb;
	uint32_t	dboot_start_offset;
	char		kern_bootpath[OBP_MAXPATHLEN];
	extern uintptr_t postbootkernelbase;
	uintptr_t	saved_kernelbase;
	int		bootpath_len = 0;
	int		is_failsafe = 0;
	int		is_retry = 0;
	uint64_t	end_addr;

	if (!fastreboot_capable)
		return;

	if (newkernel.fi_valid)
		fastboot_free_newkernel(&newkernel);

	saved_kernelbase = postbootkernelbase;

	postbootkernelbase = 0;

	/*
	 * Initialize various HAT related fields in the data structure
	 */
	fastboot_init_fields(&newkernel);

	bzero(kern_bootpath, OBP_MAXPATHLEN);

	/*
	 * Process the boot argument
	 */
	bzero(fastboot_args, OBP_MAXPATHLEN);
	fastboot_parse_mdep(mdep, kern_bootpath, &bootpath_len, fastboot_args);

	/*
	 * Make sure we get the null character
	 */
	bcopy(kern_bootpath, fastboot_filename[FASTBOOT_NAME_UNIX],
	    bootpath_len);
	bcopy(kern_bootfile,
	    &fastboot_filename[FASTBOOT_NAME_UNIX][bootpath_len],
	    strlen(kern_bootfile) + 1);

	bcopy(kern_bootpath, fastboot_filename[FASTBOOT_NAME_BOOTARCHIVE],
	    bootpath_len);

	if (bcmp(kern_bootfile, FAILSAFE_BOOTFILE32,
	    (sizeof (FAILSAFE_BOOTFILE32) - 1)) == 0 ||
	    bcmp(kern_bootfile, FAILSAFE_BOOTFILE64,
	    (sizeof (FAILSAFE_BOOTFILE64) - 1)) == 0) {
		is_failsafe = 1;
	}

load_kernel_retry:
	/*
	 * Read in unix and boot_archive
	 */
	end_addr = DBOOT_ENTRY_ADDRESS;
	for (i = 0; i < FASTBOOT_MAX_FILES_MAP; i++) {
		struct _buf	*file;
		uintptr_t	va;
		uint64_t	fsize;
		size_t		fsize_roundup, pt_size;
		int		page_index;
		uintptr_t	offset;
		ddi_dma_attr_t dma_attr = fastboot_dma_attr;


		dprintf("fastboot_filename[%d] = %s\n",
		    i, fastboot_filename[i]);

		if ((file = kobj_open_file(fastboot_filename[i])) ==
		    (struct _buf *)-1) {
			cmn_err(CE_NOTE, "!Fastboot: Couldn't open %s",
			    fastboot_filename[i]);
			goto err_out;
		}

		if (kobj_get_filesize(file, &fsize) != 0) {
			cmn_err(CE_NOTE,
			    "!Fastboot: Couldn't get filesize for %s",
			    fastboot_filename[i]);
			goto err_out;
		}

		fsize_roundup = P2ROUNDUP_TYPED(fsize, PAGESIZE, size_t);

		/*
		 * Where the files end in physical memory after being
		 * relocated by the fast boot switcher.
		 */
		end_addr += fsize_roundup;
		if (end_addr > fastboot_below_1G_dma_attr.dma_attr_addr_hi) {
			cmn_err(CE_NOTE, "!Fastboot: boot archive is too big");
			goto err_out;
		}

		/*
		 * Adjust dma_attr_addr_lo so that the new kernel and boot
		 * archive will not be overridden during relocation.
		 */
		if (end_addr > fastboot_dma_attr.dma_attr_addr_lo ||
		    end_addr > fastboot_below_1G_dma_attr.dma_attr_addr_lo) {

			if (is_retry) {
				/*
				 * If we have already tried and didn't succeed,
				 * just give up.
				 */
				cmn_err(CE_NOTE,
				    "!Fastboot: boot archive is too big");
				goto err_out;
			} else {
				/* Set the flag so we don't keep retrying */
				is_retry++;

				/* Adjust dma_attr_addr_lo */
				fastboot_dma_attr.dma_attr_addr_lo = end_addr;
				fastboot_below_1G_dma_attr.dma_attr_addr_lo =
				    end_addr;

				/*
				 * Free the memory we have already allocated
				 * whose physical addresses might not fit
				 * the new lo and hi constraints.
				 */
				fastboot_free_mem(&newkernel, end_addr);
				goto load_kernel_retry;
			}
		}


		if (!fastboot_contig)
			dma_attr.dma_attr_sgllen = (fsize / PAGESIZE) +
			    (((fsize % PAGESIZE) == 0) ? 0 : 1);

		if ((buf = contig_alloc(fsize, &dma_attr, PAGESIZE, 0))
		    == NULL) {
			cmn_err(CE_NOTE, fastboot_enomem_msg, fsize, "64G");
			goto err_out;
		}

		va = P2ROUNDUP_TYPED((uintptr_t)buf, PAGESIZE, uintptr_t);

		if (kobj_read_file(file, (char *)va, fsize, 0) < 0) {
			cmn_err(CE_NOTE, "!Fastboot: Couldn't read %s",
			    fastboot_filename[i]);
			goto err_out;
		}

		fb = &newkernel.fi_files[i];
		fb->fb_va = va;
		fb->fb_size = fsize;
		fb->fb_sectcnt = 0;

		pt_size = FASTBOOT_PTE_LIST_SIZE(fsize_roundup);

		/*
		 * If we have reserved memory but it not enough, free it.
		 */
		if (fb->fb_pte_list_size && fb->fb_pte_list_size < pt_size) {
			contig_free((void *)fb->fb_pte_list_va,
			    fb->fb_pte_list_size);
			fb->fb_pte_list_size = 0;
		}

		if (fb->fb_pte_list_size == 0) {
			if ((fb->fb_pte_list_va =
			    (x86pte_t *)contig_alloc(pt_size,
			    &fastboot_below_1G_dma_attr, PAGESIZE, 0))
			    == NULL) {
				cmn_err(CE_NOTE, fastboot_enomem_msg,
				    (uint64_t)pt_size, "1G");
				goto err_out;
			}
			/*
			 * fb_pte_list_size must be set after the allocation
			 * succeeds as it's used to determine how much memory to
			 * free.
			 */
			fb->fb_pte_list_size = pt_size;
		}

		bzero((void *)(fb->fb_pte_list_va), fb->fb_pte_list_size);

		fb->fb_pte_list_pa = mmu_ptob((uint64_t)hat_getpfnum(kas.a_hat,
		    (caddr_t)fb->fb_pte_list_va));

		for (page_index = 0, offset = 0; offset < fb->fb_size;
		    offset += PAGESIZE) {
			uint64_t paddr;

			paddr = mmu_ptob((uint64_t)hat_getpfnum(kas.a_hat,
			    (caddr_t)fb->fb_va + offset));

			ASSERT(paddr >= fastboot_dma_attr.dma_attr_addr_lo);

			/*
			 * Include the pte_bits so we don't have to make
			 * it in assembly.
			 */
			fb->fb_pte_list_va[page_index++] = (x86pte_t)
			    (paddr | pte_bits);
		}

		fb->fb_pte_list_va[page_index] = FASTBOOT_TERMINATE;

		if (i == FASTBOOT_UNIX) {
			Ehdr	*ehdr = (Ehdr *)va;
			int	j;

			/*
			 * Sanity checks:
			 */
			for (j = 0; j < SELFMAG; j++) {
				if (ehdr->e_ident[j] != ELFMAG[j]) {
					cmn_err(CE_NOTE, "!Fastboot: Bad ELF "
					    "signature");
					goto err_out;
				}
			}

			if (ehdr->e_ident[EI_CLASS] == ELFCLASS32 &&
			    ehdr->e_ident[EI_DATA] == ELFDATA2LSB &&
			    ehdr->e_machine == EM_386) {

				fb->fb_sectcnt = sizeof (fb->fb_sections) /
				    sizeof (fb->fb_sections[0]);

				if (fastboot_elf32_find_loadables((void *)va,
				    fsize, &fb->fb_sections[0],
				    &fb->fb_sectcnt, &dboot_start_offset) < 0) {
					cmn_err(CE_NOTE, "!Fastboot: ELF32 "
					    "program section failure");
					goto err_out;
				}

				if (fb->fb_sectcnt == 0) {
					cmn_err(CE_NOTE, "!Fastboot: No ELF32 "
					    "program sections found");
					goto err_out;
				}

				if (is_failsafe) {
					/* Failsafe boot_archive */
					bcopy(BOOTARCHIVE32_FAILSAFE,
					    &fastboot_filename
					    [FASTBOOT_NAME_BOOTARCHIVE]
					    [bootpath_len],
					    sizeof (BOOTARCHIVE32_FAILSAFE));
				} else {
					bcopy(BOOTARCHIVE32,
					    &fastboot_filename
					    [FASTBOOT_NAME_BOOTARCHIVE]
					    [bootpath_len],
					    sizeof (BOOTARCHIVE32));
				}

			} else if (ehdr->e_ident[EI_CLASS] == ELFCLASS64 &&
			    ehdr->e_ident[EI_DATA] == ELFDATA2LSB &&
			    ehdr->e_machine == EM_AMD64) {

				if (fastboot_elf64_find_dboot_load_offset(
				    (void *)va, fsize, &dboot_start_offset)
				    != 0) {
					cmn_err(CE_NOTE, "!Fastboot: Couldn't "
					    "find ELF64 dboot entry offset");
					goto err_out;
				}

				if (!is_x86_feature(x86_featureset,
				    X86FSET_64) ||
				    !is_x86_feature(x86_featureset,
				    X86FSET_PAE)) {
					cmn_err(CE_NOTE, "Fastboot: Cannot "
					    "reboot to %s: "
					    "not a 64-bit capable system",
					    kern_bootfile);
					goto err_out;
				}

				if (is_failsafe) {
					/* Failsafe boot_archive */
					bcopy(BOOTARCHIVE64_FAILSAFE,
					    &fastboot_filename
					    [FASTBOOT_NAME_BOOTARCHIVE]
					    [bootpath_len],
					    sizeof (BOOTARCHIVE64_FAILSAFE));
				} else {
					bcopy(BOOTARCHIVE64,
					    &fastboot_filename
					    [FASTBOOT_NAME_BOOTARCHIVE]
					    [bootpath_len],
					    sizeof (BOOTARCHIVE64));
				}
			} else {
				cmn_err(CE_NOTE, "!Fastboot: Unknown ELF type");
				goto err_out;
			}

			fb->fb_dest_pa = DBOOT_ENTRY_ADDRESS -
			    dboot_start_offset;

			fb->fb_next_pa = DBOOT_ENTRY_ADDRESS + fsize_roundup;
		} else {
			fb->fb_dest_pa = newkernel.fi_files[i - 1].fb_next_pa;
			fb->fb_next_pa = fb->fb_dest_pa + fsize_roundup;
		}

		kobj_close_file(file);

	}

	/*
	 * Add the function that will switch us to 32-bit protected mode
	 */
	fb = &newkernel.fi_files[FASTBOOT_SWTCH];
	fb->fb_va = fb->fb_dest_pa = FASTBOOT_SWTCH_PA;
	fb->fb_size = MMU_PAGESIZE;

	hat_devload(kas.a_hat, (caddr_t)fb->fb_va,
	    MMU_PAGESIZE, mmu_btop(fb->fb_dest_pa),
	    PROT_READ | PROT_WRITE | PROT_EXEC,
	    HAT_LOAD_NOCONSIST | HAT_LOAD_LOCK);

	/*
	 * Build the new multiboot_info structure
	 */
	if (fastboot_build_mbi(fastboot_args, &newkernel) != 0) {
		goto err_out;
	}

	/*
	 * Build page table for low 1G physical memory. Use big pages.
	 * Allocate 4 (5 for amd64) pages for the page tables.
	 *    1 page for PML4 (amd64)
	 *    1 page for Page-Directory-Pointer Table
	 *    2 pages for Page Directory
	 *    1 page for Page Table.
	 * The page table entry will be rewritten to map the physical
	 * address as we do the copying.
	 */
	if (newkernel.fi_has_pae) {
#ifdef	__amd64
		size_t size = MMU_PAGESIZE * 5;
#else
		size_t size = MMU_PAGESIZE * 4;
#endif	/* __amd64 */

		if (newkernel.fi_pagetable_size && newkernel.fi_pagetable_size
		    < size) {
			contig_free((void *)newkernel.fi_pagetable_va,
			    newkernel.fi_pagetable_size);
			newkernel.fi_pagetable_size = 0;
		}

		if (newkernel.fi_pagetable_size == 0) {
			if ((newkernel.fi_pagetable_va = (uintptr_t)
			    contig_alloc(size, &fastboot_below_1G_dma_attr,
			    MMU_PAGESIZE, 0)) == NULL) {
				cmn_err(CE_NOTE, fastboot_enomem_msg,
				    (uint64_t)size, "1G");
				goto err_out;
			}
			/*
			 * fi_pagetable_size must be set after the allocation
			 * succeeds as it's used to determine how much memory to
			 * free.
			 */
			newkernel.fi_pagetable_size = size;
		}

		bzero((void *)(newkernel.fi_pagetable_va), size);

		newkernel.fi_pagetable_pa =
		    mmu_ptob((uint64_t)hat_getpfnum(kas.a_hat,
		    (caddr_t)newkernel.fi_pagetable_va));

		newkernel.fi_last_table_pa = newkernel.fi_pagetable_pa +
		    size - MMU_PAGESIZE;

		newkernel.fi_next_table_va = newkernel.fi_pagetable_va +
		    MMU_PAGESIZE;
		newkernel.fi_next_table_pa = newkernel.fi_pagetable_pa +
		    MMU_PAGESIZE;

		fastboot_build_pagetables(&newkernel);
	}


	/* Generate MD5 checksums */
	fastboot_cksum_generate(&newkernel);

	/* Mark it as valid */
	newkernel.fi_valid = 1;
	newkernel.fi_magic = FASTBOOT_MAGIC;

	postbootkernelbase = saved_kernelbase;
	return;

err_out:
	postbootkernelbase = saved_kernelbase;
	newkernel.fi_valid = 0;
	fastboot_free_newkernel(&newkernel);
}
Exemplo n.º 4
0
/*
 * Called when the module is first loaded, this routine loads the configuration
 * file into the SPA namespace.  It does not actually open or load the pools; it
 * only populates the namespace.
 */
void
spa_config_load(void)
{
    void *buf = NULL;
    nvlist_t *nvlist, *child;
    nvpair_t *nvpair;
    spa_t *spa;
    char pathname[128];
    struct _buf *file;
    uint64_t fsize;

    /*
     * Open the configuration file.
     */
#ifdef __native_client__
    (void) snprintf(pathname, sizeof (pathname), "%s", spa_config_path);
#else
    (void) snprintf(pathname, sizeof (pathname), "%s%s",
                    (rootdir != NULL) ? "./" : "", spa_config_path);
#endif //__native_client__

    file = kobj_open_file(pathname);
    if (file == (struct _buf *)-1)
        return;

    if (kobj_get_filesize(file, &fsize) != 0)
        goto out;

    buf = kmem_alloc(fsize, KM_SLEEP);

    /*
     * Read the nvlist from the file.
     */
    if (kobj_read_file(file, buf, fsize, 0) < 0)
        goto out;

    /*
     * Unpack the nvlist.
     */
    if (nvlist_unpack(buf, fsize, &nvlist, KM_SLEEP) != 0)
        goto out;

    /*
     * Iterate over all elements in the nvlist, creating a new spa_t for
     * each one with the specified configuration.
     */
    mutex_enter(&spa_namespace_lock);
    nvpair = NULL;
    while ((nvpair = nvlist_next_nvpair(nvlist, nvpair)) != NULL) {

        if (nvpair_type(nvpair) != DATA_TYPE_NVLIST)
            continue;

        VERIFY(nvpair_value_nvlist(nvpair, &child) == 0);

        if (spa_lookup(nvpair_name(nvpair)) != NULL)
            continue;
        spa = spa_add(nvpair_name(nvpair), NULL);

        /*
         * We blindly duplicate the configuration here.  If it's
         * invalid, we will catch it when the pool is first opened.
         */
        VERIFY(nvlist_dup(child, &spa->spa_config, 0) == 0);
    }
    mutex_exit(&spa_namespace_lock);

    nvlist_free(nvlist);

out:
    if (buf != NULL)
        kmem_free(buf, fsize);

    kobj_close_file(file);
}