ACPI_STATUS AcpiOsTableOverride(ACPI_TABLE_HEADER *ExistingTable, ACPI_TABLE_HEADER **NewTable) { char signature[5]; char oemid[7]; char oemtableid[9]; struct _buf *file; char *buf1, *buf2; int count; char acpi_table_loc[128]; acpica_strncpy(signature, ExistingTable->Signature, 4); acpica_strncpy(oemid, ExistingTable->OemId, 6); acpica_strncpy(oemtableid, ExistingTable->OemTableId, 8); #ifdef DEBUG cmn_err(CE_NOTE, "!acpica: table [%s] v%d OEM ID [%s]" " OEM TABLE ID [%s] OEM rev %x", signature, ExistingTable->Revision, oemid, oemtableid, ExistingTable->OemRevision); #endif /* File name format is "signature_oemid_oemtableid.dat" */ (void) strcpy(acpi_table_loc, acpi_table_path); (void) strcat(acpi_table_loc, signature); /* for example, DSDT */ (void) strcat(acpi_table_loc, "_"); (void) strcat(acpi_table_loc, oemid); /* for example, IntelR */ (void) strcat(acpi_table_loc, "_"); (void) strcat(acpi_table_loc, oemtableid); /* for example, AWRDACPI */ (void) strcat(acpi_table_loc, ".dat"); file = kobj_open_file(acpi_table_loc); if (file == (struct _buf *)-1) { *NewTable = 0; return (AE_OK); } else { buf1 = (char *)kmem_alloc(MAX_DAT_FILE_SIZE, KM_SLEEP); count = kobj_read_file(file, buf1, MAX_DAT_FILE_SIZE-1, 0); if (count >= MAX_DAT_FILE_SIZE) { cmn_err(CE_WARN, "!acpica: table %s file size too big", acpi_table_loc); *NewTable = 0; } else { buf2 = (char *)kmem_alloc(count, KM_SLEEP); (void) memcpy(buf2, buf1, count); *NewTable = (ACPI_TABLE_HEADER *)buf2; cmn_err(CE_NOTE, "!acpica: replacing table: %s", acpi_table_loc); } } kobj_close_file(file); kmem_free(buf1, MAX_DAT_FILE_SIZE); return (AE_OK); }
/* * Checks to see if the /etc/path_to_inst file exists and whether or not * it has the magic string in it. * * Returns one of the following: * * PTI_FOUND - We have found the /etc/path_to_inst file * PTI_REBUILD - We have found the /etc/path_to_inst file and the * first line was PTI_MAGIC_STR. * PTI_NOT_FOUND - We did not find the /etc/path_to_inst file * */ static int in_get_infile(char *filename) { struct _buf *file; int return_val; char buf[PTI_MAGIC_STR_LEN]; /* * Try to open the file. */ if ((file = kobj_open_file(filename)) == (struct _buf *)-1) { return (PTI_NOT_FOUND); } return_val = PTI_FOUND; /* * Read the first PTI_MAGIC_STR_LEN bytes from the file to see if * it contains the magic string. If there aren't that many bytes * in the file, then assume file is correct and no magic string * and move on. */ switch (kobj_read_file(file, buf, PTI_MAGIC_STR_LEN, 0)) { case PTI_MAGIC_STR_LEN: /* * If the first PTI_MAGIC_STR_LEN bytes are the magic string * then return PTI_REBUILD. */ if (strncmp(PTI_MAGIC_STR, buf, PTI_MAGIC_STR_LEN) == 0) return_val = PTI_REBUILD; break; case 0: /* * If the file is zero bytes in length, then consider the * file to not be found */ return_val = PTI_NOT_FOUND; default: /* Do nothing we have a good file */ break; } kobj_close_file(file); return (return_val); }
/* * Called when the module is first loaded, this routine loads the configuration * file into the SPA namespace. It does not actually open or load the pools; it * only populates the namespace. */ void spa_config_load(void) { void *buf = NULL; nvlist_t *nvlist, *child; nvpair_t *nvpair; spa_t *spa; char pathname[128]; struct _buf *file; struct bootstat bst; /* * Open the configuration file. */ (void) snprintf(pathname, sizeof (pathname), "%s%s/%s", (rootdir != NULL) ? "./" : "", spa_config_dir, ZPOOL_CACHE_FILE); file = kobj_open_file(pathname); if (file == (struct _buf *)-1) return; if (kobj_fstat(file->_fd, &bst) != 0) goto out; buf = kmem_alloc(bst.st_size, KM_SLEEP); /* * Read the nvlist from the file. */ if (kobj_read_file(file, buf, bst.st_size, 0) < 0) goto out; /* * Unpack the nvlist. */ if (nvlist_unpack(buf, bst.st_size, &nvlist, KM_SLEEP) != 0) goto out; /* * Iterate over all elements in the nvlist, creating a new spa_t for * each one with the specified configuration. */ mutex_enter(&spa_namespace_lock); nvpair = NULL; while ((nvpair = nvlist_next_nvpair(nvlist, nvpair)) != NULL) { if (nvpair_type(nvpair) != DATA_TYPE_NVLIST) continue; VERIFY(nvpair_value_nvlist(nvpair, &child) == 0); if (spa_lookup(nvpair_name(nvpair)) != NULL) continue; spa = spa_add(nvpair_name(nvpair), NULL); /* * We blindly duplicate the configuration here. If it's * invalid, we will catch it when the pool is first opened. */ VERIFY(nvlist_dup(child, &spa->spa_config, 0) == 0); } mutex_exit(&spa_namespace_lock); nvlist_free(nvlist); out: if (buf != NULL) kmem_free(buf, bst.st_size); kobj_close_file(file); }
/* * Called when the module is first loaded, this routine loads the configuration * file into the SPA namespace. It does not actually open or load the pools; it * only populates the namespace. */ void spa_config_load(void) { void *buf = NULL; nvlist_t *nvlist, *child; nvpair_t *nvpair; char *pathname; struct _buf *file; uint64_t fsize; /* * Open the configuration file. */ pathname = kmem_alloc(MAXPATHLEN, KM_SLEEP); (void) snprintf(pathname, MAXPATHLEN, "%s", spa_config_path); file = kobj_open_file(pathname); kmem_free(pathname, MAXPATHLEN); if (file == (struct _buf *)-1) return; if (kobj_get_filesize(file, &fsize) != 0) goto out; buf = kmem_alloc(fsize, KM_SLEEP); /* * Read the nvlist from the file. */ if (kobj_read_file(file, buf, fsize, 0) < 0) goto out; /* * Unpack the nvlist. */ if (nvlist_unpack(buf, fsize, &nvlist, KM_SLEEP) != 0) goto out; /* * Iterate over all elements in the nvlist, creating a new spa_t for * each one with the specified configuration. */ mutex_enter(&spa_namespace_lock); nvpair = NULL; while ((nvpair = nvlist_next_nvpair(nvlist, nvpair)) != NULL) { if (nvpair_type(nvpair) != DATA_TYPE_NVLIST) continue; VERIFY(nvpair_value_nvlist(nvpair, &child) == 0); if (spa_lookup(nvpair_name(nvpair)) != NULL) continue; (void) spa_add(nvpair_name(nvpair), child, NULL); } mutex_exit(&spa_namespace_lock); nvlist_free(nvlist); out: if (buf != NULL) kmem_free(buf, fsize); kobj_close_file(file); }
/* * This function performs the following tasks: * - Read the sizes of the new kernel and boot archive. * - Allocate memory for the new kernel and boot archive. * - Allocate memory for page tables necessary for mapping the memory * allocated for the files. * - Read the new kernel and boot archive into memory. * - Map in the fast reboot switcher. * - Load the fast reboot switcher to FASTBOOT_SWTCH_PA. * - Build the new multiboot_info structure * - Build page tables for the low 1G of physical memory. * - Mark the data structure as valid if all steps have succeeded. */ void fastboot_load_kernel(char *mdep) { void *buf = NULL; int i; fastboot_file_t *fb; uint32_t dboot_start_offset; char kern_bootpath[OBP_MAXPATHLEN]; extern uintptr_t postbootkernelbase; uintptr_t saved_kernelbase; int bootpath_len = 0; int is_failsafe = 0; int is_retry = 0; uint64_t end_addr; if (!fastreboot_capable) return; if (newkernel.fi_valid) fastboot_free_newkernel(&newkernel); saved_kernelbase = postbootkernelbase; postbootkernelbase = 0; /* * Initialize various HAT related fields in the data structure */ fastboot_init_fields(&newkernel); bzero(kern_bootpath, OBP_MAXPATHLEN); /* * Process the boot argument */ bzero(fastboot_args, OBP_MAXPATHLEN); fastboot_parse_mdep(mdep, kern_bootpath, &bootpath_len, fastboot_args); /* * Make sure we get the null character */ bcopy(kern_bootpath, fastboot_filename[FASTBOOT_NAME_UNIX], bootpath_len); bcopy(kern_bootfile, &fastboot_filename[FASTBOOT_NAME_UNIX][bootpath_len], strlen(kern_bootfile) + 1); bcopy(kern_bootpath, fastboot_filename[FASTBOOT_NAME_BOOTARCHIVE], bootpath_len); if (bcmp(kern_bootfile, FAILSAFE_BOOTFILE32, (sizeof (FAILSAFE_BOOTFILE32) - 1)) == 0 || bcmp(kern_bootfile, FAILSAFE_BOOTFILE64, (sizeof (FAILSAFE_BOOTFILE64) - 1)) == 0) { is_failsafe = 1; } load_kernel_retry: /* * Read in unix and boot_archive */ end_addr = DBOOT_ENTRY_ADDRESS; for (i = 0; i < FASTBOOT_MAX_FILES_MAP; i++) { struct _buf *file; uintptr_t va; uint64_t fsize; size_t fsize_roundup, pt_size; int page_index; uintptr_t offset; ddi_dma_attr_t dma_attr = fastboot_dma_attr; dprintf("fastboot_filename[%d] = %s\n", i, fastboot_filename[i]); if ((file = kobj_open_file(fastboot_filename[i])) == (struct _buf *)-1) { cmn_err(CE_NOTE, "!Fastboot: Couldn't open %s", fastboot_filename[i]); goto err_out; } if (kobj_get_filesize(file, &fsize) != 0) { cmn_err(CE_NOTE, "!Fastboot: Couldn't get filesize for %s", fastboot_filename[i]); goto err_out; } fsize_roundup = P2ROUNDUP_TYPED(fsize, PAGESIZE, size_t); /* * Where the files end in physical memory after being * relocated by the fast boot switcher. */ end_addr += fsize_roundup; if (end_addr > fastboot_below_1G_dma_attr.dma_attr_addr_hi) { cmn_err(CE_NOTE, "!Fastboot: boot archive is too big"); goto err_out; } /* * Adjust dma_attr_addr_lo so that the new kernel and boot * archive will not be overridden during relocation. */ if (end_addr > fastboot_dma_attr.dma_attr_addr_lo || end_addr > fastboot_below_1G_dma_attr.dma_attr_addr_lo) { if (is_retry) { /* * If we have already tried and didn't succeed, * just give up. */ cmn_err(CE_NOTE, "!Fastboot: boot archive is too big"); goto err_out; } else { /* Set the flag so we don't keep retrying */ is_retry++; /* Adjust dma_attr_addr_lo */ fastboot_dma_attr.dma_attr_addr_lo = end_addr; fastboot_below_1G_dma_attr.dma_attr_addr_lo = end_addr; /* * Free the memory we have already allocated * whose physical addresses might not fit * the new lo and hi constraints. */ fastboot_free_mem(&newkernel, end_addr); goto load_kernel_retry; } } if (!fastboot_contig) dma_attr.dma_attr_sgllen = (fsize / PAGESIZE) + (((fsize % PAGESIZE) == 0) ? 0 : 1); if ((buf = contig_alloc(fsize, &dma_attr, PAGESIZE, 0)) == NULL) { cmn_err(CE_NOTE, fastboot_enomem_msg, fsize, "64G"); goto err_out; } va = P2ROUNDUP_TYPED((uintptr_t)buf, PAGESIZE, uintptr_t); if (kobj_read_file(file, (char *)va, fsize, 0) < 0) { cmn_err(CE_NOTE, "!Fastboot: Couldn't read %s", fastboot_filename[i]); goto err_out; } fb = &newkernel.fi_files[i]; fb->fb_va = va; fb->fb_size = fsize; fb->fb_sectcnt = 0; pt_size = FASTBOOT_PTE_LIST_SIZE(fsize_roundup); /* * If we have reserved memory but it not enough, free it. */ if (fb->fb_pte_list_size && fb->fb_pte_list_size < pt_size) { contig_free((void *)fb->fb_pte_list_va, fb->fb_pte_list_size); fb->fb_pte_list_size = 0; } if (fb->fb_pte_list_size == 0) { if ((fb->fb_pte_list_va = (x86pte_t *)contig_alloc(pt_size, &fastboot_below_1G_dma_attr, PAGESIZE, 0)) == NULL) { cmn_err(CE_NOTE, fastboot_enomem_msg, (uint64_t)pt_size, "1G"); goto err_out; } /* * fb_pte_list_size must be set after the allocation * succeeds as it's used to determine how much memory to * free. */ fb->fb_pte_list_size = pt_size; } bzero((void *)(fb->fb_pte_list_va), fb->fb_pte_list_size); fb->fb_pte_list_pa = mmu_ptob((uint64_t)hat_getpfnum(kas.a_hat, (caddr_t)fb->fb_pte_list_va)); for (page_index = 0, offset = 0; offset < fb->fb_size; offset += PAGESIZE) { uint64_t paddr; paddr = mmu_ptob((uint64_t)hat_getpfnum(kas.a_hat, (caddr_t)fb->fb_va + offset)); ASSERT(paddr >= fastboot_dma_attr.dma_attr_addr_lo); /* * Include the pte_bits so we don't have to make * it in assembly. */ fb->fb_pte_list_va[page_index++] = (x86pte_t) (paddr | pte_bits); } fb->fb_pte_list_va[page_index] = FASTBOOT_TERMINATE; if (i == FASTBOOT_UNIX) { Ehdr *ehdr = (Ehdr *)va; int j; /* * Sanity checks: */ for (j = 0; j < SELFMAG; j++) { if (ehdr->e_ident[j] != ELFMAG[j]) { cmn_err(CE_NOTE, "!Fastboot: Bad ELF " "signature"); goto err_out; } } if (ehdr->e_ident[EI_CLASS] == ELFCLASS32 && ehdr->e_ident[EI_DATA] == ELFDATA2LSB && ehdr->e_machine == EM_386) { fb->fb_sectcnt = sizeof (fb->fb_sections) / sizeof (fb->fb_sections[0]); if (fastboot_elf32_find_loadables((void *)va, fsize, &fb->fb_sections[0], &fb->fb_sectcnt, &dboot_start_offset) < 0) { cmn_err(CE_NOTE, "!Fastboot: ELF32 " "program section failure"); goto err_out; } if (fb->fb_sectcnt == 0) { cmn_err(CE_NOTE, "!Fastboot: No ELF32 " "program sections found"); goto err_out; } if (is_failsafe) { /* Failsafe boot_archive */ bcopy(BOOTARCHIVE32_FAILSAFE, &fastboot_filename [FASTBOOT_NAME_BOOTARCHIVE] [bootpath_len], sizeof (BOOTARCHIVE32_FAILSAFE)); } else { bcopy(BOOTARCHIVE32, &fastboot_filename [FASTBOOT_NAME_BOOTARCHIVE] [bootpath_len], sizeof (BOOTARCHIVE32)); } } else if (ehdr->e_ident[EI_CLASS] == ELFCLASS64 && ehdr->e_ident[EI_DATA] == ELFDATA2LSB && ehdr->e_machine == EM_AMD64) { if (fastboot_elf64_find_dboot_load_offset( (void *)va, fsize, &dboot_start_offset) != 0) { cmn_err(CE_NOTE, "!Fastboot: Couldn't " "find ELF64 dboot entry offset"); goto err_out; } if (!is_x86_feature(x86_featureset, X86FSET_64) || !is_x86_feature(x86_featureset, X86FSET_PAE)) { cmn_err(CE_NOTE, "Fastboot: Cannot " "reboot to %s: " "not a 64-bit capable system", kern_bootfile); goto err_out; } if (is_failsafe) { /* Failsafe boot_archive */ bcopy(BOOTARCHIVE64_FAILSAFE, &fastboot_filename [FASTBOOT_NAME_BOOTARCHIVE] [bootpath_len], sizeof (BOOTARCHIVE64_FAILSAFE)); } else { bcopy(BOOTARCHIVE64, &fastboot_filename [FASTBOOT_NAME_BOOTARCHIVE] [bootpath_len], sizeof (BOOTARCHIVE64)); } } else { cmn_err(CE_NOTE, "!Fastboot: Unknown ELF type"); goto err_out; } fb->fb_dest_pa = DBOOT_ENTRY_ADDRESS - dboot_start_offset; fb->fb_next_pa = DBOOT_ENTRY_ADDRESS + fsize_roundup; } else { fb->fb_dest_pa = newkernel.fi_files[i - 1].fb_next_pa; fb->fb_next_pa = fb->fb_dest_pa + fsize_roundup; } kobj_close_file(file); } /* * Add the function that will switch us to 32-bit protected mode */ fb = &newkernel.fi_files[FASTBOOT_SWTCH]; fb->fb_va = fb->fb_dest_pa = FASTBOOT_SWTCH_PA; fb->fb_size = MMU_PAGESIZE; hat_devload(kas.a_hat, (caddr_t)fb->fb_va, MMU_PAGESIZE, mmu_btop(fb->fb_dest_pa), PROT_READ | PROT_WRITE | PROT_EXEC, HAT_LOAD_NOCONSIST | HAT_LOAD_LOCK); /* * Build the new multiboot_info structure */ if (fastboot_build_mbi(fastboot_args, &newkernel) != 0) { goto err_out; } /* * Build page table for low 1G physical memory. Use big pages. * Allocate 4 (5 for amd64) pages for the page tables. * 1 page for PML4 (amd64) * 1 page for Page-Directory-Pointer Table * 2 pages for Page Directory * 1 page for Page Table. * The page table entry will be rewritten to map the physical * address as we do the copying. */ if (newkernel.fi_has_pae) { #ifdef __amd64 size_t size = MMU_PAGESIZE * 5; #else size_t size = MMU_PAGESIZE * 4; #endif /* __amd64 */ if (newkernel.fi_pagetable_size && newkernel.fi_pagetable_size < size) { contig_free((void *)newkernel.fi_pagetable_va, newkernel.fi_pagetable_size); newkernel.fi_pagetable_size = 0; } if (newkernel.fi_pagetable_size == 0) { if ((newkernel.fi_pagetable_va = (uintptr_t) contig_alloc(size, &fastboot_below_1G_dma_attr, MMU_PAGESIZE, 0)) == NULL) { cmn_err(CE_NOTE, fastboot_enomem_msg, (uint64_t)size, "1G"); goto err_out; } /* * fi_pagetable_size must be set after the allocation * succeeds as it's used to determine how much memory to * free. */ newkernel.fi_pagetable_size = size; } bzero((void *)(newkernel.fi_pagetable_va), size); newkernel.fi_pagetable_pa = mmu_ptob((uint64_t)hat_getpfnum(kas.a_hat, (caddr_t)newkernel.fi_pagetable_va)); newkernel.fi_last_table_pa = newkernel.fi_pagetable_pa + size - MMU_PAGESIZE; newkernel.fi_next_table_va = newkernel.fi_pagetable_va + MMU_PAGESIZE; newkernel.fi_next_table_pa = newkernel.fi_pagetable_pa + MMU_PAGESIZE; fastboot_build_pagetables(&newkernel); } /* Generate MD5 checksums */ fastboot_cksum_generate(&newkernel); /* Mark it as valid */ newkernel.fi_valid = 1; newkernel.fi_magic = FASTBOOT_MAGIC; postbootkernelbase = saved_kernelbase; return; err_out: postbootkernelbase = saved_kernelbase; newkernel.fi_valid = 0; fastboot_free_newkernel(&newkernel); }
int fread_nvlist(char *filename, nvlist_t **ret_nvlist) { struct _buf *file; nvpf_hdr_t hdr; char *buf; nvlist_t *nvl; int rval; uint_t offset; int n; char c; uint16_t cksum, hdrsum; *ret_nvlist = NULL; file = kobj_open_file(filename); if (file == (struct _buf *)-1) { KFDEBUG((CE_CONT, "cannot open file: %s\n", filename)); return (ENOENT); } offset = 0; n = kobj_read_file(file, (char *)&hdr, sizeof (hdr), offset); if (n != sizeof (hdr)) { kobj_close_file(file); if (n < 0) { KFIOERR((CE_CONT, "error reading header: %s\n", filename)); return (EIO); } else if (n == 0) { KFDEBUG((CE_CONT, "file empty: %s\n", filename)); } else { KFIOERR((CE_CONT, "header size incorrect: %s\n", filename)); } return (EINVAL); } offset += n; KFDEBUG2((CE_CONT, "nvpf_magic: 0x%x\n", hdr.nvpf_magic)); KFDEBUG2((CE_CONT, "nvpf_version: %d\n", hdr.nvpf_version)); KFDEBUG2((CE_CONT, "nvpf_size: %lld\n", (longlong_t)hdr.nvpf_size)); KFDEBUG2((CE_CONT, "nvpf_hdr_chksum: 0x%x\n", hdr.nvpf_hdr_chksum)); KFDEBUG2((CE_CONT, "nvpf_chksum: 0x%x\n", hdr.nvpf_chksum)); cksum = hdr.nvpf_hdr_chksum; hdr.nvpf_hdr_chksum = 0; hdrsum = nvp_cksum((uchar_t *)&hdr, sizeof (hdr)); if (hdr.nvpf_magic != NVPF_HDR_MAGIC || hdr.nvpf_version != NVPF_HDR_VERSION || hdrsum != cksum) { kobj_close_file(file); if (hdrsum != cksum) { KFIOERR((CE_CONT, "%s: checksum error " "(actual 0x%x, expected 0x%x)\n", filename, hdrsum, cksum)); } KFIOERR((CE_CONT, "%s: header information incorrect", filename)); return (EINVAL); } ASSERT(hdr.nvpf_size >= 0); buf = kmem_alloc(hdr.nvpf_size, KM_SLEEP); n = kobj_read_file(file, buf, hdr.nvpf_size, offset); if (n != hdr.nvpf_size) { kmem_free(buf, hdr.nvpf_size); kobj_close_file(file); if (n < 0) { KFIOERR((CE_CONT, "%s: read error %d", filename, n)); } else { KFIOERR((CE_CONT, "%s: incomplete read %d/%lld", filename, n, (longlong_t)hdr.nvpf_size)); } return (EINVAL); } offset += n; rval = kobj_read_file(file, &c, 1, offset); kobj_close_file(file); if (rval > 0) { KFIOERR((CE_CONT, "%s is larger than %lld\n", filename, (longlong_t)hdr.nvpf_size)); kmem_free(buf, hdr.nvpf_size); return (EINVAL); } cksum = nvp_cksum((uchar_t *)buf, hdr.nvpf_size); if (hdr.nvpf_chksum != cksum) { KFIOERR((CE_CONT, "%s: checksum error (actual 0x%x, expected 0x%x)\n", filename, hdr.nvpf_chksum, cksum)); kmem_free(buf, hdr.nvpf_size); return (EINVAL); } nvl = NULL; rval = nvlist_unpack(buf, hdr.nvpf_size, &nvl, 0); if (rval != 0) { KFIOERR((CE_CONT, "%s: error %d unpacking nvlist\n", filename, rval)); kmem_free(buf, hdr.nvpf_size); return (EINVAL); } kmem_free(buf, hdr.nvpf_size); *ret_nvlist = nvl; return (0); }
static int splat_kobj_test2(struct file *file, void *arg) { struct _buf *f; char *buf; uint64_t size; int rc; f = kobj_open_file(SPLAT_KOBJ_TEST_FILE); if (f == (struct _buf *)-1) { splat_vprint(file, SPLAT_KOBJ_TEST2_NAME, "Failed to open " "test file: %s\n", SPLAT_KOBJ_TEST_FILE); return -ENOENT; } rc = kobj_get_filesize(f, &size); if (rc) { splat_vprint(file, SPLAT_KOBJ_TEST2_NAME, "Failed stat of " "test file: %s (%d)\n", SPLAT_KOBJ_TEST_FILE, rc); goto out; } buf = kmalloc(size + 1, GFP_KERNEL); if (!buf) { rc = -ENOMEM; splat_vprint(file, SPLAT_KOBJ_TEST2_NAME, "Failed to alloc " "%lld bytes for tmp buffer (%d)\n", (long long)size, rc); goto out; } memset(buf, 0, size + 1); rc = kobj_read_file(f, buf, size, 0); if (rc < 0) { splat_vprint(file, SPLAT_KOBJ_TEST2_NAME, "Failed read of " "test file: %s (%d)\n", SPLAT_KOBJ_TEST_FILE, rc); goto out2; } /* Validate we read as many bytes as expected based on the stat. This * isn't a perfect test since we didn't create the file however it is * pretty unlikely there are garbage characters in your /etc/fstab */ if (size != (uint64_t)strlen(buf)) { rc = -EFBIG; splat_vprint(file, SPLAT_KOBJ_TEST2_NAME, "Stat'ed size " "(%lld) does not match number of bytes read " "(%lld)\n", (long long)size, (long long)strlen(buf)); goto out2; } rc = 0; splat_vprint(file, SPLAT_KOBJ_TEST2_NAME, "\n%s\n", buf); splat_vprint(file, SPLAT_KOBJ_TEST2_NAME, "Successfully stat'ed " "and read expected number of bytes (%lld) from test " "file: %s\n", (long long)size, SPLAT_KOBJ_TEST_FILE); out2: kfree(buf); out: kobj_close_file(f); return rc; } /* splat_kobj_test2() */