NTSTATUS kuhl_m_standard_answer(int argc, wchar_t * argv[]) { kprintf(L"42.\n"); return STATUS_SUCCESS; }
static UInt32 IODTMapOneInterrupt( IORegistryEntry * regEntry, UInt32 * intSpec, UInt32 index, OSData ** spec, const OSSymbol ** controller ) { IORegistryEntry *parent = 0; OSData *data; UInt32 *addrCmp; UInt32 *maskCmp; UInt32 *map; UInt32 *endMap; UInt32 acells, icells, pacells, picells, cell; UInt32 i, original_icells; bool cmp, ok = false; parent = IODTFindInterruptParent( regEntry, index ); IODTGetICellCounts( parent, &icells, &acells ); addrCmp = 0; if( acells) { data = OSDynamicCast( OSData, regEntry->getProperty( "reg" )); if( data && (data->getLength() >= (acells * sizeof(UInt32)))) addrCmp = (UInt32 *) data->getBytesNoCopy(); } original_icells = icells; regEntry = parent; do { #if IODTSUPPORTDEBUG kprintf ("IODTMapOneInterrupt: current regEntry name %s\n", regEntry->getName()); kprintf ("acells - icells: "); for (i = 0; i < acells; i++) kprintf ("0x%08X ", addrCmp[i]); kprintf ("- "); for (i = 0; i < icells; i++) kprintf ("0x%08X ", intSpec[i]); kprintf ("\n"); #endif if( parent && (data = OSDynamicCast( OSData, regEntry->getProperty( "interrupt-controller")))) { // found a controller - don't want to follow cascaded controllers parent = 0; *spec = OSData::withBytesNoCopy( (void *) intSpec, icells * sizeof(UInt32)); *controller = IODTInterruptControllerName( regEntry ); ok = (*spec && *controller); } else if( parent && (data = OSDynamicCast( OSData, regEntry->getProperty( "interrupt-map")))) { // interrupt-map map = (UInt32 *) data->getBytesNoCopy(); endMap = map + (data->getLength() / sizeof(UInt32)); data = OSDynamicCast( OSData, regEntry->getProperty( "interrupt-map-mask" )); if( data && (data->getLength() >= ((acells + icells) * sizeof(UInt32)))) maskCmp = (UInt32 *) data->getBytesNoCopy(); else maskCmp = 0; #if IODTSUPPORTDEBUG if (maskCmp) { kprintf (" maskCmp: "); for (i = 0; i < acells + icells; i++) { if (i == acells) kprintf ("- "); kprintf ("0x%08X ", maskCmp[i]); } kprintf ("\n"); kprintf (" masked: "); for (i = 0; i < acells + icells; i++) { if (i == acells) kprintf ("- "); kprintf ("0x%08X ", ((i < acells) ? addrCmp[i] : intSpec[i-acells]) & maskCmp[i]); } kprintf ("\n"); } else kprintf ("no maskCmp\n"); #endif do { #if IODTSUPPORTDEBUG kprintf (" map: "); for (i = 0; i < acells + icells; i++) { if (i == acells) kprintf ("- "); kprintf ("0x%08X ", map[i]); } kprintf ("\n"); #endif for( i = 0, cmp = true; cmp && (i < (acells + icells)); i++) { cell = (i < acells) ? addrCmp[i] : intSpec[ i - acells ]; if( maskCmp) cell &= maskCmp[i]; cmp = (cell == map[i]); } map += acells + icells; if( 0 == (parent = FindPHandle( *(map++) ))) unexpected(break); IODTGetICellCounts( parent, &picells, &pacells ); if( cmp) { addrCmp = map; intSpec = map + pacells; regEntry = parent; } else { map += pacells + picells; } } while( !cmp && (map < endMap) ); if (!cmp) parent = 0; } if( parent) { IODTGetICellCounts( parent, &icells, &acells ); regEntry = parent; } } while( parent);
static int check_part(char *sname, struct dos_partition *dp, u_int64_t offset, int nsectors, int ntracks, u_int64_t mbr_offset) { int chs_ecyl; int chs_esect; int chs_scyl; int chs_ssect; int error; u_long secpercyl; u_int64_t esector; u_int64_t esector1; u_int64_t ssector; u_int64_t ssector1; secpercyl = (u_long)nsectors * ntracks; chs_scyl = DPCYL(dp->dp_scyl, dp->dp_ssect); chs_ssect = DPSECT(dp->dp_ssect); ssector = chs_ssect - 1 + dp->dp_shd * nsectors + chs_scyl * secpercyl + mbr_offset; ssector1 = offset + dp->dp_start; /* * If ssector1 is on a cylinder >= 1024, then ssector can't be right. * Allow the C/H/S for it to be 1023/ntracks-1/nsectors, or correct * apart from the cylinder being reduced modulo 1024. Always allow * 1023/255/63, because this is the official way to represent * pure-LBA for the starting position. */ if ((ssector < ssector1 && ((chs_ssect == nsectors && dp->dp_shd == ntracks - 1 && chs_scyl == 1023) || (secpercyl != 0 && (ssector1 - ssector) % (1024 * secpercyl) == 0))) || (dp->dp_scyl == 255 && dp->dp_shd == 255 && dp->dp_ssect == 255)) { TRACE(("%s: C/H/S start %d/%d/%d, start %llu: allow\n", sname, chs_scyl, dp->dp_shd, chs_ssect, (long long)ssector1)); ssector = ssector1; } chs_ecyl = DPCYL(dp->dp_ecyl, dp->dp_esect); chs_esect = DPSECT(dp->dp_esect); esector = chs_esect - 1 + dp->dp_ehd * nsectors + chs_ecyl * secpercyl + mbr_offset; esector1 = ssector1 + dp->dp_size - 1; /* * Allow certain bogus C/H/S values for esector, as above. However, * heads == 255 isn't really legal and causes some BIOS crashes. The * correct value to indicate a pure-LBA end is 1023/heads-1/sectors - * usually 1023/254/63. "heads" is base 0, "sectors" is base 1. */ if ((esector < esector1 && ((chs_esect == nsectors && dp->dp_ehd == ntracks - 1 && chs_ecyl == 1023) || (secpercyl != 0 && (esector1 - esector) % (1024 * secpercyl) == 0))) || (dp->dp_ecyl == 255 && dp->dp_ehd == 255 && dp->dp_esect == 255)) { TRACE(("%s: C/H/S end %d/%d/%d, end %llu: allow\n", sname, chs_ecyl, dp->dp_ehd, chs_esect, (long long)esector1)); esector = esector1; } error = (ssector == ssector1 && esector == esector1) ? 0 : EINVAL; if (bootverbose) kprintf("%s: type 0x%x, start %llu, end = %llu, size %u %s\n", sname, dp->dp_typ, (long long)ssector1, (long long)esector1, dp->dp_size, (error ? "" : ": OK")); if (ssector != ssector1 && bootverbose) kprintf("%s: C/H/S start %d/%d/%d (%llu) != start %llu: invalid\n", sname, chs_scyl, dp->dp_shd, chs_ssect, (long long)ssector, (long long)ssector1); if (esector != esector1 && bootverbose) kprintf("%s: C/H/S end %d/%d/%d (%llu) != end %llu: invalid\n", sname, chs_ecyl, dp->dp_ehd, chs_esect, (long long)esector, (long long)esector1); return (error); }
/** * Intializes the system and becomes the null process. * This is where the system begins after the C environment has been * established. Interrupts are initially DISABLED, and must eventually * be enabled explicitly. This routine turns itself into the null process * after initialization. Because the null process must always remain ready * to run, it cannot execute code that might cause it to be suspended, wait * for a semaphore, or put to sleep, or exit. In particular, it must not * do I/O unless it uses kprintf for synchronous output. */ int nulluser() { kprintf(VERSION); kprintf("\r\n\r\n"); platforminit(); #ifdef DETAIL /* Output detected platform. */ kprintf("Processor identification: 0x%08X\r\n", cpuid); kprintf("Detected platform as: %s\r\n\r\n",platform.name); #endif sysinit(); /* Output XINU memory layout */ kprintf("%10d bytes physical memory.\r\n", (ulong) platform.maxaddr & 0x7FFFFFFF ); #ifdef DETAIL kprintf(" [0x%08X to 0x%08X]\r\n", (ulong) KSEG0_BASE, (ulong) (platform.maxaddr - 1)); #endif kprintf("%10d bytes reserved system area.\r\n", (ulong) _start - KSEG0_BASE); #ifdef DETAIL kprintf(" [0x%08X to 0x%08X]\r\n", (ulong) KSEG0_BASE, (ulong) _start - 1); #endif kprintf("%10d bytes XINU code.\r\n", (ulong) &end - (ulong) _start); #ifdef DETAIL kprintf(" [0x%08X to 0x%08X]\r\n", (ulong) _start, (ulong) &end - 1); #endif kprintf("%10d bytes stack space.\r\n", (ulong) minheap - (ulong) &end); #ifdef DETAIL kprintf(" [0x%08X to 0x%08X]\r\n", (ulong) &end, (ulong) minheap - 1); #endif kprintf("%10d bytes heap space.\r\n", (ulong) platform.maxaddr - (ulong) minheap); #ifdef DETAIL kprintf(" [0x%08X to 0x%08X]\r\n\r\n", (ulong) minheap, (ulong) platform.maxaddr - 1); #endif /* TODO: This line won't compile properly until you have added * a priority parameter to the create() function. */ ready(create((void *)main, INITSTK, 1, "MAIN", 2, 0, NULL), 0); /* enable interrupts here */ enable(); while(1) { if(nonempty(readylist)) resched(); /* If there are no processes left in the system, completed. */ if (numproc <= 1) { kprintf("\r\n\r\nAll user processes have completed.\r\n\r\n"); while (1) ; } } }
BOOL CALLBACK kuhl_m_process_imports_callback_module(PKULL_M_PROCESS_VERY_BASIC_MODULE_INFORMATION pModuleInformation, PVOID pvArg) { kprintf(L"\n%wZ", pModuleInformation->NameDontUseOutsideCallback); kull_m_process_getImportedEntryInformations(&pModuleInformation->DllBase, kuhl_m_process_imports_callback_module_importedEntry, pvArg); return TRUE; }
int KEXT_FIPSPost(kmod_info_t* pkmod, void* d, int verbose) { int result = 0; verbose = 1; /* -------------------------------------------------------------------------- Integrity test: Tests to see if the module has been tampered. -------------------------------------------------------------------------- */ result = Integrity_POST(pkmod, d, verbose); if (result) { if (verbose) { kprintf("corecrypto.kext FIPS integrity POST test failed\n"); } #ifndef FORCE_FAIL return result; #endif } else if (verbose) { kprintf("corecrypto.kext FIPS integrity POST test passed!\n"); } /* -------------------------------------------------------------------------- Test for AES in CBC mode -------------------------------------------------------------------------- */ result = AES_CBC_POST(); if (result) { if (verbose) { kprintf("corecrypto.kext FIPS AES CBC POST test failed\n"); } #ifndef FORCE_FAIL return result; #endif } else if (verbose) { kprintf("corecrypto.kext FIPS AES CBC POST test passed!\n"); } /* -------------------------------------------------------------------------- Test for Triple DES in CBC mode -------------------------------------------------------------------------- */ result = TDES_CBC_POST(); if (result) { if (verbose) { kprintf("corecrypto.kext FIPS TDES CBC POST test failed\n"); } #ifndef FORCE_FAIL return result; #endif } else if (verbose) { kprintf("corecrypto.kext FIPS TDES CBC POST test passed!\n"); } /* -------------------------------------------------------------------------- Test for AES in ECB mode -------------------------------------------------------------------------- */ result = AES_ECB_POST(); if (result) { if (verbose) { kprintf("corecrypto.kext FIPS AES ECB POST test failed\n"); } #ifndef FORCE_FAIL return result; #endif } else if (verbose) { kprintf("corecrypto.kext FIPS AES ECB POST test passed!\n"); } /* -------------------------------------------------------------------------- Test for AES in XTS mode -------------------------------------------------------------------------- */ result = AES_XTS_POST(); if (result) { if (verbose) { kprintf("corecrypto.kext FIPS AES XTS POST test failed\n"); } #ifndef FORCE_FAIL return result; #endif } else if (verbose) { kprintf("corecrypto.kext FIPS AES XTS POST test passed!\n"); } /* -------------------------------------------------------------------------- Test for SHA digests -------------------------------------------------------------------------- */ result = SHA_POST(); if (result) { if (verbose) { kprintf("corecrypto.kext FIPS SHA POST test failed\n"); } #ifndef FORCE_FAIL return result; #endif } else if (verbose) { kprintf("corecrypto.kext FIPS SHA POST test passed!\n"); } /* -------------------------------------------------------------------------- Test for HMACs -------------------------------------------------------------------------- */ result = HMAC_POST(); if (result) { if (verbose) { kprintf("corecrypto.kext FIPS HMAC POST test failed\n"); } #ifndef FORCE_FAIL return result; #endif } else if (verbose) { kprintf("corecrypto.kext FIPS HMAC POST test passed!\n"); } /* -------------------------------------------------------------------------- Test for ECDSA (Eliptical Curve) -------------------------------------------------------------------------- */ result = ECDSA_POST(); if (result) { if (verbose) { kprintf("corecrypto.kext FIPS ECDSA POST test failed\n"); } #ifndef FORCE_FAIL return result; #endif } else if (verbose) { kprintf("corecrypto.kext FIPS ECDSA POST test passed!\n"); } /* -------------------------------------------------------------------------- Test for DRBG (Deterministic Random Bit Generator) -------------------------------------------------------------------------- */ result = DRBG_POST(); if (result) { if (verbose) { kprintf("corecrypto.kext FIPS DRBG POST test failed\n"); } #ifndef FORCE_FAIL return result; #endif } else if (verbose) { kprintf("corecrypto.kext FIPS DRBG POST test passed!\n"); } /* -------------------------------------------------------------------------- Test for RSA -------------------------------------------------------------------------- */ result = RSA_POST(); if (result) { if (verbose) { kprintf("corecrypto.kext FIPS RSA POST test failed\n"); } #ifndef FORCE_FAIL return result; #endif } else if (verbose) { kprintf("corecrypto.kext FIPS RSA POST test passed!\n"); } /* -------------------------------------------------------------------------- If the code gets here all the tests have passed and the kext may set up the crypto KPIs -------------------------------------------------------------------------- */ if (verbose) { kprintf("corecrypto.kext FIPS POST passed!\n"); } return 0; }
/* * Initial boot sequence. */ static void boot(void) { /* * The order of these is important! * Don't go changing it without thinking about the consequences. * * Among other things, be aware that console output gets * buffered up at first and does not actually appear until * mainbus_bootstrap() attaches the console device. This can * be remarkably confusing if a bug occurs at this point. So * don't put new code before mainbus_bootstrap if you don't * absolutely have to. * * Also note that the buffer for this is only 1k. If you * overflow it, the system will crash without printing * anything at all. You can make it larger though (it's in * dev/generic/console.c). */ hello(); kprintf("\n"); kprintf("OS/161 base system version %s\n", BASE_VERSION); kprintf("%s", harvard_copyright); kprintf("\n"); kprintf("fassel system version %s (%s #%d)\n", GROUP_VERSION, buildconfig, buildversion); kprintf("\n"); /* Early initialization. */ ram_bootstrap(); proc_bootstrap(); thread_bootstrap(); hardclock_bootstrap(); vfs_bootstrap(); /* Probe and initialize devices. Interrupts should come on. */ kprintf("Device probe...\n"); KASSERT(curthread->t_curspl > 0); mainbus_bootstrap(); KASSERT(curthread->t_curspl == 0); /* Now do pseudo-devices. */ pseudoconfig(); kprintf("\n"); /* Late phase of initialization. */ vm_bootstrap(); kprintf_bootstrap(); thread_start_cpus(); /* Default bootfs - but ignore failure, in case emu0 doesn't exist */ vfs_setbootfs("emu0"); /* * Make sure various things aren't screwed up. */ COMPILE_ASSERT(sizeof(userptr_t) == sizeof(char *)); COMPILE_ASSERT(sizeof(*(userptr_t)0) == sizeof(char)); }
static int hammer_format_volume_header(struct hammer_mount *hmp, struct vnode *devvp, const char *vol_name, int vol_no, int vol_count, int64_t vol_size, int64_t boot_area_size, int64_t mem_area_size) { struct buf *bp = NULL; struct hammer_volume_ondisk *ondisk; int error; /* * Extract the volume number from the volume header and do various * sanity checks. */ KKASSERT(HAMMER_BUFSIZE >= sizeof(struct hammer_volume_ondisk)); error = bread(devvp, 0LL, HAMMER_BUFSIZE, &bp); if (error || bp->b_bcount < sizeof(struct hammer_volume_ondisk)) goto late_failure; ondisk = (struct hammer_volume_ondisk*) bp->b_data; /* * Note that we do NOT allow to use a device that contains * a valid HAMMER signature. It has to be cleaned up with dd * before. */ if (ondisk->vol_signature == HAMMER_FSBUF_VOLUME) { kprintf("hammer_volume_add: Formatting of valid HAMMER volume " "%s denied. Erase with dd!\n", vol_name); error = EFTYPE; goto late_failure; } bzero(ondisk, sizeof(struct hammer_volume_ondisk)); ksnprintf(ondisk->vol_name, sizeof(ondisk->vol_name), "%s", vol_name); ondisk->vol_fstype = hmp->rootvol->ondisk->vol_fstype; ondisk->vol_signature = HAMMER_FSBUF_VOLUME; ondisk->vol_fsid = hmp->fsid; ondisk->vol_rootvol = hmp->rootvol->vol_no; ondisk->vol_no = vol_no; ondisk->vol_count = vol_count; ondisk->vol_version = hmp->version; /* * Reserve space for (future) header junk, setup our poor-man's * bigblock allocator. */ int64_t vol_alloc = HAMMER_BUFSIZE * 16; ondisk->vol_bot_beg = vol_alloc; vol_alloc += boot_area_size; ondisk->vol_mem_beg = vol_alloc; vol_alloc += mem_area_size; /* * The remaining area is the zone 2 buffer allocation area. These * buffers */ ondisk->vol_buf_beg = vol_alloc; ondisk->vol_buf_end = vol_size & ~(int64_t)HAMMER_BUFMASK; if (ondisk->vol_buf_end < ondisk->vol_buf_beg) { kprintf("volume %d %s is too small to hold the volume header", ondisk->vol_no, ondisk->vol_name); error = EFTYPE; goto late_failure; } ondisk->vol_nblocks = (ondisk->vol_buf_end - ondisk->vol_buf_beg) / HAMMER_BUFSIZE; ondisk->vol_blocksize = HAMMER_BUFSIZE; /* * Write volume header to disk */ error = bwrite(bp); bp = NULL; late_failure: if (bp) brelse(bp); return (error); }
/* * The output routine. Takes a packet and encapsulates it in the protocol * given by sc->g_proto. See also RFC 1701 and RFC 2004 */ static int gre_output_serialized(struct ifnet *ifp, struct mbuf *m, struct sockaddr *dst, struct rtentry *rt) { int error = 0; struct gre_softc *sc = ifp->if_softc; struct greip *gh; struct ip *ip; u_short etype = 0; struct mobile_h mob_h; /* * gre may cause infinite recursion calls when misconfigured. * We'll prevent this by introducing upper limit. */ if (++(sc->called) > max_gre_nesting) { kprintf("%s: gre_output: recursively called too many " "times(%d)\n", if_name(&sc->sc_if), sc->called); m_freem(m); error = EIO; /* is there better errno? */ goto end; } if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) == 0 || sc->g_src.s_addr == INADDR_ANY || sc->g_dst.s_addr == INADDR_ANY) { m_freem(m); error = ENETDOWN; goto end; } gh = NULL; ip = NULL; if (ifp->if_bpf) { bpf_gettoken(); if (ifp->if_bpf) { uint32_t af = dst->sa_family; bpf_ptap(ifp->if_bpf, m, &af, sizeof(af)); } bpf_reltoken(); } m->m_flags &= ~(M_BCAST|M_MCAST); if (sc->g_proto == IPPROTO_MOBILE) { if (dst->sa_family == AF_INET) { struct mbuf *m0; int msiz; ip = mtod(m, struct ip *); /* * RFC2004 specifies that fragmented datagrams shouldn't * be encapsulated. */ if (ip->ip_off & (IP_MF | IP_OFFMASK)) { m_freem(m); error = EINVAL; /* is there better errno? */ goto end; } memset(&mob_h, 0, MOB_H_SIZ_L); mob_h.proto = (ip->ip_p) << 8; mob_h.odst = ip->ip_dst.s_addr; ip->ip_dst.s_addr = sc->g_dst.s_addr; /* * If the packet comes from our host, we only change * the destination address in the IP header. * Else we also need to save and change the source */ if (in_hosteq(ip->ip_src, sc->g_src)) { msiz = MOB_H_SIZ_S; } else { mob_h.proto |= MOB_H_SBIT; mob_h.osrc = ip->ip_src.s_addr; ip->ip_src.s_addr = sc->g_src.s_addr; msiz = MOB_H_SIZ_L; } mob_h.proto = htons(mob_h.proto); mob_h.hcrc = gre_in_cksum((u_short *)&mob_h, msiz); if ((m->m_data - msiz) < m->m_pktdat) { /* need new mbuf */ MGETHDR(m0, MB_DONTWAIT, MT_HEADER); if (m0 == NULL) { m_freem(m); error = ENOBUFS; goto end; } m0->m_next = m; m->m_data += sizeof(struct ip); m->m_len -= sizeof(struct ip); m0->m_pkthdr.len = m->m_pkthdr.len + msiz; m0->m_len = msiz + sizeof(struct ip); m0->m_data += max_linkhdr; memcpy(mtod(m0, caddr_t), (caddr_t)ip, sizeof(struct ip)); m = m0; } else { /* we have some space left in the old one */ m->m_data -= msiz; m->m_len += msiz; m->m_pkthdr.len += msiz; bcopy(ip, mtod(m, caddr_t), sizeof(struct ip)); } ip = mtod(m, struct ip *); memcpy((caddr_t)(ip + 1), &mob_h, (unsigned)msiz); ip->ip_len = ntohs(ip->ip_len) + msiz; } else { /* AF_INET */ m_freem(m); error = EINVAL; goto end; } } else if (sc->g_proto == IPPROTO_GRE) {
/* * Remove a volume. */ int hammer_ioc_volume_del(hammer_transaction_t trans, hammer_inode_t ip, struct hammer_ioc_volume *ioc) { struct hammer_mount *hmp = trans->hmp; struct mount *mp = hmp->mp; hammer_volume_t volume; int error = 0; if (mp->mnt_flag & MNT_RDONLY) { kprintf("Cannot del volume from read-only HAMMER filesystem\n"); return (EINVAL); } if (hammer_lock_ex_try(&hmp->volume_lock) != 0) { kprintf("Another volume operation is in progress!\n"); return (EAGAIN); } volume = NULL; /* * find volume by volname */ for (int vol_no = 0; vol_no < HAMMER_MAX_VOLUMES; ++vol_no) { volume = hammer_get_volume(hmp, vol_no, &error); if (volume == NULL && error == ENOENT) { /* * Skip unused volume numbers */ error = 0; continue; } KKASSERT(volume != NULL && error == 0); if (strcmp(volume->vol_name, ioc->device_name) == 0) { break; } hammer_rel_volume(volume, 0); volume = NULL; } if (volume == NULL) { kprintf("Couldn't find volume\n"); error = EINVAL; goto end; } if (volume == trans->rootvol) { kprintf("Cannot remove root-volume\n"); hammer_rel_volume(volume, 0); error = EINVAL; goto end; } /* * */ hmp->volume_to_remove = volume->vol_no; struct hammer_ioc_reblock reblock; bzero(&reblock, sizeof(reblock)); reblock.key_beg.localization = HAMMER_MIN_LOCALIZATION; reblock.key_beg.obj_id = HAMMER_MIN_OBJID; reblock.key_end.localization = HAMMER_MAX_LOCALIZATION; reblock.key_end.obj_id = HAMMER_MAX_OBJID; reblock.head.flags = HAMMER_IOC_DO_FLAGS; reblock.free_level = 0; error = hammer_ioc_reblock(trans, ip, &reblock); if (reblock.head.flags & HAMMER_IOC_HEAD_INTR) { error = EINTR; } if (error) { if (error == EINTR) { kprintf("reblock was interrupted\n"); } else { kprintf("reblock failed: %d\n", error); } hmp->volume_to_remove = -1; hammer_rel_volume(volume, 0); goto end; } /* * Sync filesystem */ int count = 0; while (hammer_flusher_haswork(hmp)) { hammer_flusher_sync(hmp); ++count; if (count >= 5) { if (count == 5) kprintf("HAMMER: flushing."); else kprintf("."); tsleep(&count, 0, "hmrufl", hz); } if (count == 30) { kprintf("giving up"); break; } } kprintf("\n"); hammer_sync_lock_sh(trans); hammer_lock_ex(&hmp->blkmap_lock); /* * We use stat later to update rootvol's bigblock stats */ struct bigblock_stat stat; error = hammer_free_freemap(trans, volume, &stat); if (error) { kprintf("Failed to free volume. Volume not empty!\n"); hmp->volume_to_remove = -1; hammer_rel_volume(volume, 0); hammer_unlock(&hmp->blkmap_lock); hammer_sync_unlock(trans); goto end; } hmp->volume_to_remove = -1; hammer_rel_volume(volume, 0); /* * Unload buffers */ RB_SCAN(hammer_buf_rb_tree, &hmp->rb_bufs_root, NULL, hammer_unload_buffer, volume); error = hammer_unload_volume(volume, NULL); if (error == -1) { kprintf("Failed to unload volume\n"); hammer_unlock(&hmp->blkmap_lock); hammer_sync_unlock(trans); goto end; } volume = NULL; --hmp->nvolumes; /* * Set each volume's new value of the vol_count field. */ for (int vol_no = 0; vol_no < HAMMER_MAX_VOLUMES; ++vol_no) { volume = hammer_get_volume(hmp, vol_no, &error); if (volume == NULL && error == ENOENT) { /* * Skip unused volume numbers */ error = 0; continue; } KKASSERT(volume != NULL && error == 0); hammer_modify_volume_field(trans, volume, vol_count); volume->ondisk->vol_count = hmp->nvolumes; hammer_modify_volume_done(volume); /* * Only changes to the header of the root volume * are automatically flushed to disk. For all * other volumes that we modify we do it here. * * No interlock is needed, volume buffers are not * messed with by bioops. */ if (volume != trans->rootvol && volume->io.modified) { hammer_crc_set_volume(volume->ondisk); hammer_io_flush(&volume->io, 0); } hammer_rel_volume(volume, 0); } /* * Update the total number of bigblocks */ hammer_modify_volume_field(trans, trans->rootvol, vol0_stat_bigblocks); trans->rootvol->ondisk->vol0_stat_bigblocks -= stat.total_bigblocks; hammer_modify_volume_done(trans->rootvol); /* * Update the number of free bigblocks * (including the copy in hmp) */ hammer_modify_volume_field(trans, trans->rootvol, vol0_stat_freebigblocks); trans->rootvol->ondisk->vol0_stat_freebigblocks -= stat.total_free_bigblocks; hmp->copy_stat_freebigblocks = trans->rootvol->ondisk->vol0_stat_freebigblocks; hammer_modify_volume_done(trans->rootvol); hammer_unlock(&hmp->blkmap_lock); hammer_sync_unlock(trans); /* * Erase the volume header of the removed device. * * This is to not accidentally mount the volume again. */ struct vnode *devvp = NULL; error = hammer_setup_device(&devvp, ioc->device_name, 0); if (error) { kprintf("Failed to open device: %s\n", ioc->device_name); goto end; } KKASSERT(devvp); error = hammer_clear_volume_header(devvp); if (error) { kprintf("Failed to clear volume header of device: %s\n", ioc->device_name); goto end; } hammer_close_device(&devvp, 0); KKASSERT(error == 0); end: hammer_unlock(&hmp->volume_lock); return (error); }
int hammer_ioc_volume_add(hammer_transaction_t trans, hammer_inode_t ip, struct hammer_ioc_volume *ioc) { struct hammer_mount *hmp = trans->hmp; struct mount *mp = hmp->mp; hammer_volume_t volume; int error; if (mp->mnt_flag & MNT_RDONLY) { kprintf("Cannot add volume to read-only HAMMER filesystem\n"); return (EINVAL); } if (hmp->nvolumes + 1 >= HAMMER_MAX_VOLUMES) { kprintf("Max number of HAMMER volumes exceeded\n"); return (EINVAL); } if (hammer_lock_ex_try(&hmp->volume_lock) != 0) { kprintf("Another volume operation is in progress!\n"); return (EAGAIN); } /* * Find an unused volume number. */ int free_vol_no = 0; while (free_vol_no < HAMMER_MAX_VOLUMES && RB_LOOKUP(hammer_vol_rb_tree, &hmp->rb_vols_root, free_vol_no)) { ++free_vol_no; } if (free_vol_no >= HAMMER_MAX_VOLUMES) { kprintf("Max number of HAMMER volumes exceeded\n"); hammer_unlock(&hmp->volume_lock); return (EINVAL); } struct vnode *devvp = NULL; error = hammer_setup_device(&devvp, ioc->device_name, 0); if (error) goto end; KKASSERT(devvp); error = hammer_format_volume_header( hmp, devvp, hmp->rootvol->ondisk->vol_name, free_vol_no, hmp->nvolumes+1, ioc->vol_size, ioc->boot_area_size, ioc->mem_area_size); hammer_close_device(&devvp, 0); if (error) goto end; error = hammer_install_volume(hmp, ioc->device_name, NULL); if (error) goto end; hammer_sync_lock_sh(trans); hammer_lock_ex(&hmp->blkmap_lock); ++hmp->nvolumes; /* * Set each volumes new value of the vol_count field. */ for (int vol_no = 0; vol_no < HAMMER_MAX_VOLUMES; ++vol_no) { volume = hammer_get_volume(hmp, vol_no, &error); if (volume == NULL && error == ENOENT) { /* * Skip unused volume numbers */ error = 0; continue; } KKASSERT(volume != NULL && error == 0); hammer_modify_volume_field(trans, volume, vol_count); volume->ondisk->vol_count = hmp->nvolumes; hammer_modify_volume_done(volume); /* * Only changes to the header of the root volume * are automatically flushed to disk. For all * other volumes that we modify we do it here. * * No interlock is needed, volume buffers are not * messed with by bioops. */ if (volume != trans->rootvol && volume->io.modified) { hammer_crc_set_volume(volume->ondisk); hammer_io_flush(&volume->io, 0); } hammer_rel_volume(volume, 0); } volume = hammer_get_volume(hmp, free_vol_no, &error); KKASSERT(volume != NULL && error == 0); struct bigblock_stat stat; error = hammer_format_freemap(trans, volume, &stat); KKASSERT(error == 0); /* * Increase the total number of bigblocks and update stat/vstat totals. */ hammer_modify_volume_field(trans, trans->rootvol, vol0_stat_bigblocks); trans->rootvol->ondisk->vol0_stat_bigblocks += stat.total_bigblocks; hammer_modify_volume_done(trans->rootvol); mp->mnt_stat.f_blocks += trans->rootvol->ondisk->vol0_stat_bigblocks * (HAMMER_LARGEBLOCK_SIZE / HAMMER_BUFSIZE); mp->mnt_vstat.f_blocks += trans->rootvol->ondisk->vol0_stat_bigblocks * (HAMMER_LARGEBLOCK_SIZE / HAMMER_BUFSIZE); /* * Increase the number of free bigblocks * (including the copy in hmp) */ hammer_modify_volume_field(trans, trans->rootvol, vol0_stat_freebigblocks); trans->rootvol->ondisk->vol0_stat_freebigblocks += stat.total_free_bigblocks; hmp->copy_stat_freebigblocks = trans->rootvol->ondisk->vol0_stat_freebigblocks; hammer_modify_volume_done(trans->rootvol); hammer_rel_volume(volume, 0); hammer_unlock(&hmp->blkmap_lock); hammer_sync_unlock(trans); KKASSERT(error == 0); end: hammer_unlock(&hmp->volume_lock); if (error) kprintf("An error occurred: %d\n", error); return (error); }
static int urio_attach(device_t self) { struct urio_softc *sc = device_get_softc(self); struct usb_attach_arg *uaa = device_get_ivars(self); usbd_interface_handle iface; u_int8_t epcount; usbd_status r; char * ermsg = "<none>"; int i; DPRINTFN(10,("urio_attach: sc=%p\n", sc)); sc->sc_dev = self; sc->sc_udev = uaa->device; if ((!uaa->device) || (!uaa->iface)) { ermsg = "device or iface"; goto nobulk; } sc->sc_iface = iface = uaa->iface; sc->sc_opened = 0; sc->sc_pipeh_in = 0; sc->sc_pipeh_out = 0; sc->sc_refcnt = 0; r = usbd_endpoint_count(iface, &epcount); if (r != USBD_NORMAL_COMPLETION) { ermsg = "endpoints"; goto nobulk; } sc->sc_epaddr[RIO_OUT] = 0xff; sc->sc_epaddr[RIO_IN] = 0x00; for (i = 0; i < epcount; i++) { usb_endpoint_descriptor_t *edesc = usbd_interface2endpoint_descriptor(iface, i); int d; if (!edesc) { ermsg = "interface endpoint"; goto nobulk; } d = RIO_UE_GET_DIR(edesc->bEndpointAddress); if (d != RIO_NODIR) sc->sc_epaddr[d] = edesc->bEndpointAddress; } if ( sc->sc_epaddr[RIO_OUT] == 0xff || sc->sc_epaddr[RIO_IN] == 0x00) { ermsg = "Rio I&O"; goto nobulk; } make_dev(&urio_ops, device_get_unit(self), UID_ROOT, GID_OPERATOR, 0644, "urio%d", device_get_unit(self)); DPRINTFN(10, ("urio_attach: %p\n", sc->sc_udev)); return 0; nobulk: kprintf("%s: could not find %s\n", device_get_nameunit(sc->sc_dev),ermsg); return ENXIO; }
void mountPartitions(struct List *ptlist) { struct EasyStruct es = { sizeof(struct EasyStruct), 0, "HDToolBox", 0, "Yes|No" }; struct PartitionTableNode *table; struct PartitionHandle *ph; WORD cm; WORD reboot = 0; D(bug("[HDToolBox] mountPartitions()\n")); table = (struct PartitionTableNode *)ptlist->lh_Head; while (table->ln.ln_Succ) { if (table->type != PHPTT_UNKNOWN) { ph = (struct PartitionHandle *)table->ph->table->list.lh_Head; while (ph->ln.ln_Succ) { if (existsAttr(table->pattrlist, PTA_AUTOMOUNT)) { LONG flag; GetPartitionAttrsA(ph, PT_AUTOMOUNT, &flag, TAG_DONE); if (flag) { if (existsAttr(table->pattrlist, PTA_NAME)) { UBYTE name[32]; struct DosEnvec de; GetPartitionAttrsA(ph, PT_NAME, name, PT_DOSENVEC, &de, TAG_DONE); cm = checkMount(table, name, &de); if (cm == 1) mount(table, ph, name, &de); else if (cm == 2) kprintf("may reboot\n"); else if (cm == 3) kprintf("have to reboot\n"); else kprintf("mount %s not needed\n", name); if (reboot<cm) reboot = cm; } else kprintf("Partition with no name is automountable\n"); } } ph = (struct PartitionHandle *)ph->ln.ln_Succ; } } table = (struct PartitionTableNode *)table->ln.ln_Succ; } if (reboot > 1) { if (reboot == 2) { es.es_TextFormat = "A reboot is not necessary because the changes do not\n" "affect the work of any running filesystem.\n" "Do you want to reboot anyway?"; } else { es.es_TextFormat = "A reboot is required because the changes affect\n" "the work of at least one running filesystem.\n" "Do you want to reboot now?"; } if (EasyRequestArgs(0, &es, 0, 0)) ColdReboot(); } }
NTSTATUS kuhl_m_standard_log(int argc, wchar_t * argv[]) { PCWCHAR filename = (kull_m_string_args_byName(argc, argv, L"stop", NULL, NULL) ? NULL : (argc ? argv[0] : MIMIKATZ_DEFAULT_LOG)); kprintf(L"Using \'%s\' for logfile : %s\n", filename, kull_m_output_file(filename) ? L"OK" : L"KO"); return STATUS_SUCCESS; }
int link_elf_lookup_symbol(linker_file_t lf, const char* name, c_linker_sym_t* sym) { elf_file_t ef = lf->priv; unsigned long symnum; const Elf_Sym* symp; const char *strp; unsigned long hash; int i; /* If we don't have a hash, bail. */ if (ef->buckets == NULL || ef->nbuckets == 0) { kprintf("link_elf_lookup_symbol: missing symbol hash table\n"); return ENOENT; } /* First, search hashed global symbols */ hash = elf_hash(name); symnum = ef->buckets[hash % ef->nbuckets]; while (symnum != STN_UNDEF) { if (symnum >= ef->nchains) { kprintf("link_elf_lookup_symbol: corrupt symbol table\n"); return ENOENT; } symp = ef->symtab + symnum; if (symp->st_name == 0) { kprintf("link_elf_lookup_symbol: corrupt symbol table\n"); return ENOENT; } strp = ef->strtab + symp->st_name; if (strcmp(name, strp) == 0) { if (symp->st_shndx != SHN_UNDEF || (symp->st_value != 0 && ELF_ST_TYPE(symp->st_info) == STT_FUNC) ) { *sym = (c_linker_sym_t) symp; return 0; } else { return ENOENT; } } symnum = ef->chains[symnum]; } /* If we have not found it, look at the full table (if loaded) */ if (ef->symtab == ef->ddbsymtab) return ENOENT; /* Exhaustive search */ for (i = 0, symp = ef->ddbsymtab; i < ef->ddbsymcnt; i++, symp++) { strp = ef->ddbstrtab + symp->st_name; if (strcmp(name, strp) == 0) { if (symp->st_shndx != SHN_UNDEF || (symp->st_value != 0 && ELF_ST_TYPE(symp->st_info) == STT_FUNC)) { *sym = (c_linker_sym_t) symp; return 0; } else { return ENOENT; } } } return ENOENT; }
//End Borrowed void print_error(){ kprintf("I/O Error"); panic(); }
/*------------------------------------------------------------------------ * resched -- reschedule processor to highest priority ready process * * Notes: Upon entry, currpid gives current process id. * Proctab[currpid].pstate gives correct NEXT state for * current process if other than PRREADY. *------------------------------------------------------------------------ */ int resched() { STATWORD PS; register struct pentry *optr; /* pointer to old process entry */ register struct pentry *nptr; /* pointer to new process entry */ register int i; disable(PS); /* no switch needed if current process priority higher than next*/ if ( ( (optr= &proctab[currpid])->pstate == PRCURR) && (lastkey(rdytail)<optr->pprio)) { restore(PS); return(OK); } #ifdef STKCHK /* make sure current stack has room for ctsw */ asm("movl %esp, currSP"); if (currSP - optr->plimit < 48) { kprintf("Bad SP current process, pid=%d (%s), lim=0x%lx, currently 0x%lx\n", currpid, optr->pname, (unsigned long) optr->plimit, (unsigned long) currSP); panic("current process stack overflow"); } #endif /* force context switch */ if (optr->pstate == PRCURR) { optr->pstate = PRREADY; insert(currpid,rdyhead,optr->pprio); } /* remove highest priority process at end of ready list */ nptr = &proctab[ (currpid = getlast(rdytail)) ]; nptr->pstate = PRCURR; /* mark it currently running */ #ifdef notdef #ifdef STKCHK if ( *( (int *)nptr->pbase ) != MAGIC ) { kprintf("Bad magic pid=%d value=0x%lx, at 0x%lx\n", currpid, (unsigned long) *( (int *)nptr->pbase ), (unsigned long) nptr->pbase); panic("stack corrupted"); } /* * need ~16 longs of stack space below, so include that in check * below. */ if (nptr->pesp - nptr->plimit < 48) { kprintf("Bad SP pid=%d (%s), lim=0x%lx will be 0x%lx\n", currpid, nptr->pname, (unsigned long) nptr->plimit, (unsigned long) nptr->pesp); panic("stack overflow"); } #endif /* STKCHK */ #endif /* notdef */ #ifdef RTCLOCK preempt = QUANTUM; /* reset preemption counter */ #endif #ifdef DEBUG PrintSaved(nptr); #endif // write_cr3(nptr->pg_dir.base_frm->base_addr); write_cr3(nptr->pg_dir.base_frm->base_addr ); ctxsw(&optr->pesp, optr->pirmask, &nptr->pesp, nptr->pirmask); #ifdef DEBUG PrintSaved(nptr); #endif /* The OLD process returns here when resumed. */ restore(PS); return OK; }
static int ckpt_thaw_proc(struct lwp *lp, struct file *fp) { struct proc *p = lp->lwp_proc; Elf_Phdr *phdr = NULL; Elf_Ehdr *ehdr = NULL; int error; size_t nbyte; TRACE_ENTER; ehdr = kmalloc(sizeof(Elf_Ehdr), M_TEMP, M_ZERO | M_WAITOK); if ((error = elf_gethdr(fp, ehdr)) != 0) goto done; nbyte = sizeof(Elf_Phdr) * ehdr->e_phnum; phdr = kmalloc(nbyte, M_TEMP, M_WAITOK); /* fetch description of program writable mappings */ if ((error = elf_getphdrs(fp, phdr, nbyte)) != 0) goto done; /* fetch notes section containing register state */ if ((error = elf_getnotes(lp, fp, phdr->p_filesz)) != 0) goto done; /* fetch program text vnodes */ if ((error = elf_gettextvp(p, fp)) != 0) goto done; /* fetch signal disposition */ if ((error = elf_getsigs(lp, fp)) != 0) { kprintf("failure in recovering signals\n"); goto done; } /* fetch open files */ if ((error = elf_getfiles(lp, fp)) != 0) goto done; /* handle mappings last in case we are reading from a socket */ error = elf_loadphdrs(fp, phdr, ehdr->e_phnum); /* * Set the textvp to the checkpoint file and mark the vnode so * a future checkpointing of this checkpoint-restored program * will copy out the contents of the mappings rather then trying * to record the vnode info related to the checkpoint file, which * is likely going to be destroyed when the program is re-checkpointed. */ if (error == 0 && fp->f_data && fp->f_type == DTYPE_VNODE) { if (p->p_textvp) vrele(p->p_textvp); p->p_textvp = (struct vnode *)fp->f_data; vsetflags(p->p_textvp, VCKPT); vref(p->p_textvp); } done: if (ehdr) kfree(ehdr, M_TEMP); if (phdr) kfree(phdr, M_TEMP); lwpsignal(p, lp, 35); TRACE_EXIT; return error; }
static int Integrity_POST(kmod_info_t* pkmod, void* d, int verbose) { int result = -1; // Set to zero for sucesses until it all works size_t sha256DigestBufferLength = 32; if (NULL == d) { if (verbose) { kprintf("The AppleTEXTHash_t pointer was NOT passed to the Integrity_POST function\n"); } return result; } AppleTEXTHash_t* pHashData = (AppleTEXTHash_t*)d; if (pHashData->ath_version != 1 || pHashData->ath_length != (int)sha256DigestBufferLength) { if (verbose) { kprintf("The AppleTEXTHash_t pointer passed to Integrity_POST function, is invalid\n"); } return result; } if (NULL == pHashData->ath_hash) { if (verbose) { kprintf("The AppleTEXTHash_t pointer passed to Integrity_POST function,has a null HASH pointer\n"); } return result; } unsigned long plist_hash_output_buffer_size = (sha256DigestBufferLength * 2) + 1; unsigned char plist_hash_output_buffer[plist_hash_output_buffer_size]; char* pPlistHexBuf = (char*)bytesToHexString(pHashData->ath_hash, pHashData->ath_length, plist_hash_output_buffer, plist_hash_output_buffer_size); if (verbose) { kprintf("Plist hmac value is %s\n", pPlistHexBuf); } // Now calcuate the HMAC struct mach_header* pmach_header = (struct mach_header*)pkmod->address; struct load_command* pLoadCommand = NULL; uint32_t num_load_commands = 0; if (pmach_header->magic == MH_MAGIC_64) { struct mach_header_64* pmach64_header = (struct mach_header_64*)pmach_header; num_load_commands = pmach64_header->ncmds; pLoadCommand = (struct load_command*)(((unsigned char*)pmach_header) + sizeof(struct mach_header_64)); } else if (pmach_header->magic == MH_MAGIC) { num_load_commands = pmach_header->ncmds; pLoadCommand = (struct load_command*)(((unsigned char*)pmach_header) + sizeof(struct mach_header)); } if (NULL == pLoadCommand) { if (verbose) { kprintf("pLoadCommand is NULL!\n"); } return result; } const struct ccdigest_info* di = ccsha256_di(); unsigned char hmac_key = 0; cchmac_ctx_decl(di->state_size, di->block_size, ctx); cchmac_init(di, ctx, 1, &hmac_key); int hashCreated = 0; unsigned long iCnt; unsigned long jCnt; struct segment_command* pSniffPtr = (struct segment_command*)pLoadCommand; // Loop through the Segments to find the __TEXT, __text segment for (iCnt = 0; iCnt < num_load_commands; iCnt++) { // The struct segment_command and the struct segment_command_64 have the same // first three fields so sniff the name by casting to a struct segment_command if (strncmp("__TEXT", pSniffPtr->segname, strlen("__TEXT"))) { // These are not the droids we are looking for // MOve the SniffPtr to the next segment; if (LC_SEGMENT_64 == pSniffPtr->cmd) { struct segment_command_64* pSegmentPtr = (struct segment_command_64*)pSniffPtr; pSniffPtr = (struct segment_command*)(((unsigned char *)pSegmentPtr) + pSegmentPtr->cmdsize); } else if (LC_SEGMENT == pSniffPtr->cmd) { pSniffPtr = (struct segment_command*)(((unsigned char *)pSniffPtr) + pSniffPtr->cmdsize); } // Go back to the top of the loop and look again continue; } // Bingo! We found the __TEXT segment! // Deal with a 64 bit segment if (LC_SEGMENT_64 == pLoadCommand->cmd) { struct segment_command_64* pSegmentPtr = NULL; // This is a 64 bit load segment command pSegmentPtr = (struct segment_command_64*)pSniffPtr; unsigned int numSections = (unsigned int)pSegmentPtr->nsects; struct section_64* pSectionPtr = (struct section_64*)(((unsigned char*)pSegmentPtr) + sizeof(struct segment_command_64)); int texttextsectionprocessed = 0; // Need to find the __text __TEXT section for (jCnt = 0; jCnt < numSections; jCnt++) { if ( !strcmp(pSectionPtr->sectname, "__text") && !strcmp(pSectionPtr->segname, "__TEXT")) { // Found it unsigned char* pSectionData = (unsigned char*)(((unsigned char*)pmach_header) + pSectionPtr->offset); cchmac_update(di, ctx, (unsigned long)pSectionPtr->size, pSectionData); hashCreated = 1; texttextsectionprocessed = 1; break; } else { // Move to the next section record pSectionPtr++; } } if (texttextsectionprocessed) { // The text text section was found and processed break; } } else if (LC_SEGMENT == pLoadCommand->cmd) // Deal with a 32 bit segment { struct segment_command* pSegmentPtr = NULL; // This is a 32 bit load segment command pSegmentPtr = (struct segment_command*)pLoadCommand; unsigned int numSections = (unsigned int)pSegmentPtr->nsects; struct section* pSectionPtr = (struct section*)(((unsigned char*)pSegmentPtr) + sizeof(struct segment_command)); int texttextsectionprocessed = 0; // Need to find the __text __TEXT section for (jCnt = 0; jCnt < numSections; jCnt++) { if ( !strcmp(pSectionPtr->sectname, "__text") && !strcmp(pSectionPtr->segname, "__TEXT")) { // Found it unsigned char* pSectionData = (unsigned char*)(((unsigned char*)pmach_header) + pSectionPtr->offset); cchmac_update(di, ctx, (unsigned long)pSectionPtr->size, pSectionData); hashCreated = 1; texttextsectionprocessed = 1; break; } else { // Move to the next section record pSectionPtr++; } } if (texttextsectionprocessed) { // The text text section was found and processed // Time to bail on the loop break; } } } unsigned long hash_output_buffer_size = (sha256DigestBufferLength * 2) + 1; unsigned char hash_output_buffer[hash_output_buffer_size]; unsigned char hmac_buffer[sha256DigestBufferLength]; memset(hmac_buffer, 0, sha256DigestBufferLength); // Check to see if the hash was created if (hashCreated) { // finalize the HMAC cchmac_final(di, ctx, hmac_buffer); char* pHexBuf = (char*)bytesToHexString(hmac_buffer, sha256DigestBufferLength, hash_output_buffer, hash_output_buffer_size); if (verbose) { kprintf("Computed hmac value is %s\n", pHexBuf); } } else { if (verbose) { kprintf("Integrity_POST: WARNING! could not create the hash!\n"); } return -1; } #ifdef FORCE_FAIL // futz with the generated hmac hash_output_buffer[0] = 0; // This will always work because it is the charter representation of the // hash that is being checked. #endif result = memcmp(hash_output_buffer, plist_hash_output_buffer, hash_output_buffer_size); return result; }
BOOL WINAPI kuhl_m_crypto_l_stores_enumCallback_print(const void *pvSystemStore, DWORD dwFlags, PCERT_SYSTEM_STORE_INFO pStoreInfo, void *pvReserved, void *pvArg) { kprintf(L"%2u. %s\n", (*((DWORD *) pvArg))++, (wchar_t *) pvSystemStore); return TRUE; }
void mmap_init() { kprintf("Entering MMAP\n"); const MMapEntry* mmap = (MMapEntry*)MMAP_ADDRESS; const uint32_t mmap_count = *(uint32_t*)MMAP_COUNT_ADDRESS; for (uint32_t i = 0; i < mmap_count; ++i) { kprintf("(%d) Base: 0x%x Length: %d type: %d ACPI: %d\n", i, mmap[i].base, mmap[i].length, mmap[i].type, mmap[i].ACPI); } // This is checked by the assert above const uint64_t reserved_lo = KERNEL_ALL_LO_ADDR; const uint64_t reserved_hi = KERNEL_ALL_HI_ADDR; // Need to fix the MMAP entries mmap_length = 0; for (uint32_t i = 0; i < mmap_count && mmap_length < MAX_ENTRIES; ++i) { if (mmap[i].type != TYPE_USABLE) continue; // We have some (possibly invalid) assumptions about the memory map // given by the BIOS. The first is that every region is unique, no // duplicate or overlapping regions. Second is that the type fields // are accurate. mmap_array[mmap_length].base = mmap[i].base; mmap_array[mmap_length].length = mmap[i].length; ++mmap_length; } for (int32_t i = 0; i < mmap_length; ++i) { kprintf("0x%x - %d\n", mmap_array[i].base, mmap_array[i].length); } if (mmap_length == 0) { panic("Failed to find suitable usable memory region."); } kprintf("Res Lo: 0x%x - Res Hi: 0x%x\n", reserved_lo, reserved_hi); kprintf("MMAP Entries: %d\n", mmap_length); // Fix all the found regions for (int32_t i = 0; i < mmap_length; ++i) { // Fix the region in case it intersects the kernel's memory uint64_t base = mmap_array[i].base; uint64_t end = base + mmap_array[i].length; kprintf("Region: 0x%x - 0x%x\n", base, end); if (base < reserved_lo && end > reserved_hi) { // TODO split region, but maybe not because it intersects kernel? base = reserved_hi; } else if (base < reserved_hi && end > reserved_hi) { base = reserved_hi; } else if ((base >= reserved_lo && end <= reserved_hi) || // Don't allow regions below the kernel (base < reserved_lo && end <= reserved_hi)) { kprintf("case 3\n"); // Remove this entry by moving down all other entries for (int32_t j = i; j < mmap_length-1; ++j) { mmap_array[j] = mmap_array[j+1]; } --mmap_length; --i; // We want start at this new moved entry continue; // Skip the assignment below } // Perform some more sanity checks if (base <= reserved_lo || end <= reserved_lo) { panic("Base region is too low"); } if (end <= base) { panic("Region does not exist"); } // Region does not intersect kernel mmap_array[i].base = base; mmap_array[i].length = end - base; kprintf("Fixed region: 0x%x - %d - %dMiB\n", mmap_array[i].base, mmap_array[i].length, mmap_array[i].length / 1024 / 1024); } if (mmap_length == 0) { panic("No suitable regions after fixing"); } }
NTSTATUS kuhl_m_crypto_l_certificates(int argc, wchar_t * argv[]) { HCERTSTORE hCertificateStore; PCCERT_CONTEXT pCertContext; DWORD i, j, dwSizeNeeded, keySpec; wchar_t *certName; PCRYPT_KEY_PROV_INFO pBuffer; HCRYPTPROV_OR_NCRYPT_KEY_HANDLE monProv; HCRYPTKEY maCle; BOOL keyToFree; PCWCHAR szSystemStore, szStore; DWORD dwSystemStore = 0; BOOL export = kull_m_string_args_byName(argc, argv, L"export", NULL, NULL); kull_m_string_args_byName(argc, argv, L"systemstore", &szSystemStore, L"CURRENT_USER"/*kuhl_m_crypto_system_stores[0].name*/); dwSystemStore = kull_m_crypto_system_store_to_dword(szSystemStore); kull_m_string_args_byName(argc, argv, L"store", &szStore, L"My"); kprintf(L" * System Store : \'%s\' (0x%08x)\n" L" * Store : \'%s\'\n\n", szSystemStore, dwSystemStore, szStore); if(hCertificateStore = CertOpenStore(CERT_STORE_PROV_SYSTEM, 0, (HCRYPTPROV_LEGACY) NULL, dwSystemStore | CERT_STORE_OPEN_EXISTING_FLAG | CERT_STORE_READONLY_FLAG, szStore)) { for (i = 0, pCertContext = CertEnumCertificatesInStore(hCertificateStore, NULL); pCertContext != NULL; pCertContext = CertEnumCertificatesInStore(hCertificateStore, pCertContext), i++) { for(j = 0; j < ARRAYSIZE(nameSrc); j++) { dwSizeNeeded = CertGetNameString(pCertContext, nameSrc[j], 0, NULL, NULL, 0); if(dwSizeNeeded > 0) { if(certName = (wchar_t *) LocalAlloc(LPTR, dwSizeNeeded * sizeof(wchar_t))) { if(CertGetNameString(pCertContext, nameSrc[j], 0, NULL, certName, dwSizeNeeded) == dwSizeNeeded) { kprintf(L"%2u. %s\n", i, certName); dwSizeNeeded = 0; if(CertGetCertificateContextProperty(pCertContext, CERT_KEY_PROV_INFO_PROP_ID, NULL, &dwSizeNeeded)) { if(pBuffer = (PCRYPT_KEY_PROV_INFO) LocalAlloc(LPTR, dwSizeNeeded)) { if(CertGetCertificateContextProperty(pCertContext, CERT_KEY_PROV_INFO_PROP_ID, pBuffer, &dwSizeNeeded)) { kprintf( L"\tKey Container : %s\n" L"\tProvider : %s\n", (pBuffer->pwszContainerName ? pBuffer->pwszContainerName : L"(null)"), (pBuffer->pwszProvName ? pBuffer->pwszProvName : L"(null)")); if(CryptAcquireCertificatePrivateKey(pCertContext, CRYPT_ACQUIRE_ALLOW_NCRYPT_KEY_FLAG /* CRYPT_ACQUIRE_SILENT_FLAG NULL */, NULL, &monProv, &keySpec, &keyToFree)) { kprintf(L"\tType : %s (0x%08x)\n", kull_m_crypto_keytype_to_str(keySpec), keySpec); if(keySpec != CERT_NCRYPT_KEY_SPEC) { if(CryptGetUserKey(monProv, keySpec, &maCle)) { kuhl_m_crypto_printKeyInfos(0, maCle); CryptDestroyKey(maCle); } else PRINT_ERROR_AUTO(L"CryptGetUserKey"); if(keyToFree) CryptReleaseContext(monProv, 0); } else if(kuhl_m_crypto_hNCrypt) { kuhl_m_crypto_printKeyInfos(monProv, 0); if(keyToFree) K_NCryptFreeObject(monProv); } else PRINT_ERROR(L"keySpec == CERT_NCRYPT_KEY_SPEC without CNG Handle ?\n"); } else PRINT_ERROR_AUTO(L"CryptAcquireCertificatePrivateKey"); } else PRINT_ERROR_AUTO(L"CertGetCertificateContextProperty"); } LocalFree(pBuffer); if(!export) kprintf(L"\n"); } if(export) kuhl_m_crypto_exportCert(pCertContext, (BOOL) dwSizeNeeded, szSystemStore, szStore, i, certName); } else PRINT_ERROR_AUTO(L"CertGetNameString"); LocalFree(certName); } break; } else PRINT_ERROR_AUTO(L"CertGetNameString (for len)"); }
BOOL CALLBACK kuhl_m_process_list_callback_process(PSYSTEM_PROCESS_INFORMATION pSystemProcessInformation, PVOID pvArg) { kprintf(L"%u\t%wZ\n", pSystemProcessInformation->UniqueProcessId, &pSystemProcessInformation->ImageName); return TRUE; }
static void link_elf_error(const char *s) { kprintf("kldload: %s\n", s); }
void *malloc(unsigned int num_bytes) { int i = 0; struct fm_mem_reserved *ptr; unsigned int space_required; unsigned int free_left = 0; struct fm_mem_reserved *item; // The link list describing memory actually comes from the // unassigned memory blocks itself!!! space_required = num_bytes + sizeof(struct fm_mem_reserved); for (i = 0; i < MEM_ARRAY_SIZE; i++) { if (fm_top_level_memory[i].active == TRUE) { item = fm_top_level_memory[i].head; /* * WHOLE BLOCK IS FREE. ALLOCATE FIRST CHILD AND ASSIGN TO HEAD */ if (item == NULL) { free_left = fm_top_level_memory[i].memory_end - fm_top_level_memory[i].memory_start; if (space_required < free_left) { ptr = make_block( fm_top_level_memory[i].memory_start, fm_top_level_memory[i].memory_start + space_required ); fm_top_level_memory[i].head = ptr; return (void*) ptr->memory_start; } } else { kprintf("scanning for end of list\n"); /* * ROOM BEFORE FIRST CHILD OF HEAD * (first block allocated out from this top level was removed) */ if ((item->memory_start - fm_top_level_memory[i].memory_start) > space_required) { ptr = make_block( fm_top_level_memory[i].memory_start, fm_top_level_memory[i].memory_start + space_required ); ptr->next = item; fm_top_level_memory[i].head = ptr; return (void*) ptr->memory_start; } /* * ROOM BETWEEN TWO NODES * (allocated block from the middle of the list was removed) */ while (item->next != NULL) { if ((item->next->memory_start - item->memory_end) > space_required) { //gap inside the linked list. hand it out. ptr = make_block( item->memory_end, item->memory_end + space_required ); ptr->next = item->next; item->next = ptr; return (void*) ptr->memory_start; } item = item->next; } /* * THE BLOCK ISN'T FULL AND SPACE IS AVAILABLE AT END OF LIST */ free_left = fm_top_level_memory[i].memory_end - item->memory_end; if (space_required < free_left) { ptr = make_block( item->memory_end, item->memory_end + space_required ); item->next = ptr; return (void*) ptr->memory_start; } } } } kprintf("no block big enough\n"); return E_OUT_OF_MEMORY; }
static int link_elf_load_file(const char* filename, linker_file_t* result) { struct nlookupdata nd; struct thread *td = curthread; /* XXX */ struct proc *p = td->td_proc; struct vnode *vp; Elf_Ehdr *hdr; caddr_t firstpage; int nbytes, i; Elf_Phdr *phdr; Elf_Phdr *phlimit; Elf_Phdr *segs[2]; int nsegs; Elf_Phdr *phdyn; Elf_Phdr *phphdr; caddr_t mapbase; size_t mapsize; Elf_Off base_offset; Elf_Addr base_vaddr; Elf_Addr base_vlimit; int error = 0; int resid; elf_file_t ef; linker_file_t lf; char *pathname; Elf_Shdr *shdr; int symtabindex; int symstrindex; int symcnt; int strcnt; /* XXX Hack for firmware loading where p == NULL */ if (p == NULL) { p = &proc0; } KKASSERT(p != NULL); if (p->p_ucred == NULL) { kprintf("link_elf_load_file: cannot load '%s' from filesystem" " this early\n", filename); return ENOENT; } shdr = NULL; lf = NULL; pathname = linker_search_path(filename); if (pathname == NULL) return ENOENT; error = nlookup_init(&nd, pathname, UIO_SYSSPACE, NLC_FOLLOW|NLC_LOCKVP); if (error == 0) error = vn_open(&nd, NULL, FREAD, 0); kfree(pathname, M_LINKER); if (error) { nlookup_done(&nd); return error; } vp = nd.nl_open_vp; nd.nl_open_vp = NULL; nlookup_done(&nd); /* * Read the elf header from the file. */ firstpage = kmalloc(PAGE_SIZE, M_LINKER, M_WAITOK); hdr = (Elf_Ehdr *)firstpage; error = vn_rdwr(UIO_READ, vp, firstpage, PAGE_SIZE, 0, UIO_SYSSPACE, IO_NODELOCKED, p->p_ucred, &resid); nbytes = PAGE_SIZE - resid; if (error) goto out; if (!IS_ELF(*hdr)) { error = ENOEXEC; goto out; } if (hdr->e_ident[EI_CLASS] != ELF_TARG_CLASS || hdr->e_ident[EI_DATA] != ELF_TARG_DATA) { link_elf_error("Unsupported file layout"); error = ENOEXEC; goto out; } if (hdr->e_ident[EI_VERSION] != EV_CURRENT || hdr->e_version != EV_CURRENT) { link_elf_error("Unsupported file version"); error = ENOEXEC; goto out; } if (hdr->e_type != ET_EXEC && hdr->e_type != ET_DYN) { error = ENOSYS; goto out; } if (hdr->e_machine != ELF_TARG_MACH) { link_elf_error("Unsupported machine"); error = ENOEXEC; goto out; } /* * We rely on the program header being in the first page. This is * not strictly required by the ABI specification, but it seems to * always true in practice. And, it simplifies things considerably. */ if (!((hdr->e_phentsize == sizeof(Elf_Phdr)) && (hdr->e_phoff + hdr->e_phnum*sizeof(Elf_Phdr) <= PAGE_SIZE) && (hdr->e_phoff + hdr->e_phnum*sizeof(Elf_Phdr) <= nbytes))) link_elf_error("Unreadable program headers"); /* * Scan the program header entries, and save key information. * * We rely on there being exactly two load segments, text and data, * in that order. */ phdr = (Elf_Phdr *) (firstpage + hdr->e_phoff); phlimit = phdr + hdr->e_phnum; nsegs = 0; phdyn = NULL; phphdr = NULL; while (phdr < phlimit) { switch (phdr->p_type) { case PT_LOAD: if (nsegs == 2) { link_elf_error("Too many sections"); error = ENOEXEC; goto out; } segs[nsegs] = phdr; ++nsegs; break; case PT_PHDR: phphdr = phdr; break; case PT_DYNAMIC: phdyn = phdr; break; case PT_INTERP: error = ENOSYS; goto out; } ++phdr; } if (phdyn == NULL) { link_elf_error("Object is not dynamically-linked"); error = ENOEXEC; goto out; } /* * Allocate the entire address space of the object, to stake out our * contiguous region, and to establish the base address for relocation. */ base_offset = trunc_page(segs[0]->p_offset); base_vaddr = trunc_page(segs[0]->p_vaddr); base_vlimit = round_page(segs[1]->p_vaddr + segs[1]->p_memsz); mapsize = base_vlimit - base_vaddr; ef = kmalloc(sizeof(struct elf_file), M_LINKER, M_WAITOK | M_ZERO); #ifdef SPARSE_MAPPING ef->object = vm_object_allocate(OBJT_DEFAULT, mapsize >> PAGE_SHIFT); if (ef->object == NULL) { kfree(ef, M_LINKER); error = ENOMEM; goto out; } vm_object_hold(ef->object); vm_object_reference_locked(ef->object); ef->address = (caddr_t)vm_map_min(&kernel_map); error = vm_map_find(&kernel_map, ef->object, 0, (vm_offset_t *)&ef->address, mapsize, PAGE_SIZE, 1, VM_MAPTYPE_NORMAL, VM_PROT_ALL, VM_PROT_ALL, 0); vm_object_drop(ef->object); if (error) { vm_object_deallocate(ef->object); kfree(ef, M_LINKER); goto out; } #else ef->address = kmalloc(mapsize, M_LINKER, M_WAITOK); #endif mapbase = ef->address; /* * Read the text and data sections and zero the bss. */ for (i = 0; i < 2; i++) { caddr_t segbase = mapbase + segs[i]->p_vaddr - base_vaddr; error = vn_rdwr(UIO_READ, vp, segbase, segs[i]->p_filesz, segs[i]->p_offset, UIO_SYSSPACE, IO_NODELOCKED, p->p_ucred, &resid); if (error) { #ifdef SPARSE_MAPPING vm_map_remove(&kernel_map, (vm_offset_t) ef->address, (vm_offset_t) ef->address + (ef->object->size << PAGE_SHIFT)); vm_object_deallocate(ef->object); #else kfree(ef->address, M_LINKER); #endif kfree(ef, M_LINKER); goto out; } bzero(segbase + segs[i]->p_filesz, segs[i]->p_memsz - segs[i]->p_filesz); #ifdef SPARSE_MAPPING /* * Wire down the pages */ vm_map_wire(&kernel_map, (vm_offset_t) segbase, (vm_offset_t) segbase + segs[i]->p_memsz, 0); #endif } ef->dynamic = (const Elf_Dyn *) (mapbase + phdyn->p_vaddr - base_vaddr); lf = linker_make_file(filename, ef, &link_elf_file_ops); if (lf == NULL) { #ifdef SPARSE_MAPPING vm_map_remove(&kernel_map, (vm_offset_t) ef->address, (vm_offset_t) ef->address + (ef->object->size << PAGE_SHIFT)); vm_object_deallocate(ef->object); #else kfree(ef->address, M_LINKER); #endif kfree(ef, M_LINKER); error = ENOMEM; goto out; } lf->address = ef->address; lf->size = mapsize; error = parse_dynamic(lf); if (error) goto out; link_elf_reloc_local(lf); error = linker_load_dependencies(lf); if (error) goto out; error = relocate_file(lf); if (error) goto out; /* Try and load the symbol table if it's present. (you can strip it!) */ nbytes = hdr->e_shnum * hdr->e_shentsize; if (nbytes == 0 || hdr->e_shoff == 0) goto nosyms; shdr = kmalloc(nbytes, M_LINKER, M_WAITOK | M_ZERO); error = vn_rdwr(UIO_READ, vp, (caddr_t)shdr, nbytes, hdr->e_shoff, UIO_SYSSPACE, IO_NODELOCKED, p->p_ucred, &resid); if (error) goto out; symtabindex = -1; symstrindex = -1; for (i = 0; i < hdr->e_shnum; i++) { if (shdr[i].sh_type == SHT_SYMTAB) { symtabindex = i; symstrindex = shdr[i].sh_link; } } if (symtabindex < 0 || symstrindex < 0) goto nosyms; symcnt = shdr[symtabindex].sh_size; ef->symbase = kmalloc(symcnt, M_LINKER, M_WAITOK); strcnt = shdr[symstrindex].sh_size; ef->strbase = kmalloc(strcnt, M_LINKER, M_WAITOK); error = vn_rdwr(UIO_READ, vp, ef->symbase, symcnt, shdr[symtabindex].sh_offset, UIO_SYSSPACE, IO_NODELOCKED, p->p_ucred, &resid); if (error) goto out; error = vn_rdwr(UIO_READ, vp, ef->strbase, strcnt, shdr[symstrindex].sh_offset, UIO_SYSSPACE, IO_NODELOCKED, p->p_ucred, &resid); if (error) goto out; ef->ddbsymcnt = symcnt / sizeof(Elf_Sym); ef->ddbsymtab = (const Elf_Sym *)ef->symbase; ef->ddbstrcnt = strcnt; ef->ddbstrtab = ef->strbase; nosyms: *result = lf; out: if (error && lf) linker_file_unload(lf); if (shdr) kfree(shdr, M_LINKER); if (firstpage) kfree(firstpage, M_LINKER); vn_unlock(vp); vn_close(vp, FREAD); return error; }
void udp_init() { vm_size_t str_size; struct inpcbinfo *pcbinfo; LIST_INIT(&udb); udbinfo.listhead = &udb; udbinfo.hashbase = hashinit(UDBHASHSIZE, M_PCB, &udbinfo.hashmask); udbinfo.porthashbase = hashinit(UDBHASHSIZE, M_PCB, &udbinfo.porthashmask); #ifdef __APPLE__ str_size = (vm_size_t) sizeof(struct inpcb); udbinfo.ipi_zone = (void *) zinit(str_size, 80000*str_size, 8192, "udpcb"); pcbinfo = &udbinfo; /* * allocate lock group attribute and group for udp pcb mutexes */ pcbinfo->mtx_grp_attr = lck_grp_attr_alloc_init(); pcbinfo->mtx_grp = lck_grp_alloc_init("udppcb", pcbinfo->mtx_grp_attr); pcbinfo->mtx_attr = lck_attr_alloc_init(); if ((pcbinfo->mtx = lck_rw_alloc_init(pcbinfo->mtx_grp, pcbinfo->mtx_attr)) == NULL) return; /* pretty much dead if this fails... */ in_pcb_nat_init(&udbinfo, AF_INET, IPPROTO_UDP, SOCK_DGRAM); #else udbinfo.ipi_zone = zinit("udpcb", sizeof(struct inpcb), maxsockets, ZONE_INTERRUPT, 0); #endif #if 0 /* for pcb sharing testing only */ stat = in_pcb_new_share_client(&udbinfo, &fake_owner); kprintf("udp_init in_pcb_new_share_client - stat = %d\n", stat); laddr.s_addr = 0x11646464; faddr.s_addr = 0x11646465; lport = 1500; in_pcb_grab_port(&udbinfo, 0, laddr, &lport, faddr, 1600, 0, fake_owner); kprintf("udp_init in_pcb_grab_port - stat = %d\n", stat); stat = in_pcb_rem_share_client(&udbinfo, fake_owner); kprintf("udp_init in_pcb_rem_share_client - stat = %d\n", stat); stat = in_pcb_new_share_client(&udbinfo, &fake_owner); kprintf("udp_init in_pcb_new_share_client(2) - stat = %d\n", stat); laddr.s_addr = 0x11646464; faddr.s_addr = 0x11646465; lport = 1500; stat = in_pcb_grab_port(&udbinfo, 0, laddr, &lport, faddr, 1600, 0, fake_owner); kprintf("udp_init in_pcb_grab_port(2) - stat = %d\n", stat); #endif }
static int relocate_file(linker_file_t lf) { elf_file_t ef = lf->priv; const Elf_Rel *rellim; const Elf_Rel *rel; const Elf_Rela *relalim; const Elf_Rela *rela; const char *symname; /* Perform relocations without addend if there are any: */ rel = ef->rel; if (rel) { rellim = (const Elf_Rel *)((const char *)ef->rel + ef->relsize); while (rel < rellim) { if (elf_reloc(lf, (Elf_Addr)ef->address, rel, ELF_RELOC_REL, elf_lookup)) { symname = symbol_name(ef, rel->r_info); kprintf("link_elf: symbol %s undefined\n", symname); return ENOENT; } rel++; } } /* Perform relocations with addend if there are any: */ rela = ef->rela; if (rela) { relalim = (const Elf_Rela *)((const char *)ef->rela + ef->relasize); while (rela < relalim) { if (elf_reloc(lf, (Elf_Addr)ef->address, rela, ELF_RELOC_RELA, elf_lookup)) { symname = symbol_name(ef, rela->r_info); kprintf("link_elf: symbol %s undefined\n", symname); return ENOENT; } rela++; } } /* Perform PLT relocations without addend if there are any: */ rel = ef->pltrel; if (rel) { rellim = (const Elf_Rel *)((const char *)ef->pltrel + ef->pltrelsize); while (rel < rellim) { if (elf_reloc(lf, (Elf_Addr)ef->address, rel, ELF_RELOC_REL, elf_lookup)) { symname = symbol_name(ef, rel->r_info); kprintf("link_elf: symbol %s undefined\n", symname); return ENOENT; } rel++; } } /* Perform relocations with addend if there are any: */ rela = ef->pltrela; if (rela) { relalim = (const Elf_Rela *)((const char *)ef->pltrela + ef->pltrelasize); while (rela < relalim) { symname = symbol_name(ef, rela->r_info); if (elf_reloc(lf, (Elf_Addr)ef->address, rela, ELF_RELOC_RELA, elf_lookup)) { kprintf("link_elf: symbol %s undefined\n", symname); return ENOENT; } rela++; } } return 0; }
static void mbr_extended(cdev_t dev, struct disk_info *info, struct diskslices *ssp, u_int64_t ext_offset, u_int64_t ext_size, u_int64_t base_ext_offset, int nsectors, int ntracks, u_int64_t mbr_offset, int level) { struct buf *bp; u_char *cp; int dospart; struct dos_partition *dp; struct dos_partition dpcopy[NDOSPART]; u_int64_t ext_offsets[NDOSPART]; u_int64_t ext_sizes[NDOSPART]; char partname[2]; int slice; char *sname; struct diskslice *sp; if (level >= 16) { kprintf( "%s: excessive recursion in search for slices; aborting search\n", devtoname(dev)); return; } /* Read extended boot record. */ bp = getpbuf_mem(NULL); KKASSERT((int)info->d_media_blksize <= bp->b_bufsize); bp->b_bio1.bio_offset = (off_t)ext_offset * info->d_media_blksize; bp->b_bio1.bio_done = biodone_sync; bp->b_bio1.bio_flags |= BIO_SYNC; bp->b_bcount = info->d_media_blksize; bp->b_cmd = BUF_CMD_READ; bp->b_flags |= B_FAILONDIS; dev_dstrategy(dev, &bp->b_bio1); if (biowait(&bp->b_bio1, "mbrrd") != 0) { diskerr(&bp->b_bio1, dev, "reading extended partition table: error", LOG_PRINTF, 0); kprintf("\n"); goto done; } /* Weakly verify it. */ cp = bp->b_data; if (cp[0x1FE] != 0x55 || cp[0x1FF] != 0xAA) { sname = dsname(dev, dkunit(dev), WHOLE_DISK_SLICE, WHOLE_SLICE_PART, partname); if (bootverbose) kprintf("%s: invalid extended partition table: no magic\n", sname); goto done; } /* Make a copy of the partition table to avoid alignment problems. */ memcpy(&dpcopy[0], cp + DOSPARTOFF, sizeof(dpcopy)); slice = ssp->dss_nslices; for (dospart = 0, dp = &dpcopy[0]; dospart < NDOSPART; dospart++, dp++) { ext_sizes[dospart] = 0; if (dp->dp_scyl == 0 && dp->dp_shd == 0 && dp->dp_ssect == 0 && dp->dp_start == 0 && dp->dp_size == 0) continue; if (dp->dp_typ == DOSPTYP_EXTENDED || dp->dp_typ == DOSPTYP_EXTENDEDX) { static char buf[32]; sname = dsname(dev, dkunit(dev), WHOLE_DISK_SLICE, WHOLE_SLICE_PART, partname); ksnprintf(buf, sizeof(buf), "%s", sname); if (strlen(buf) < sizeof buf - 11) strcat(buf, "<extended>"); check_part(buf, dp, base_ext_offset, nsectors, ntracks, mbr_offset); ext_offsets[dospart] = base_ext_offset + dp->dp_start; ext_sizes[dospart] = dp->dp_size; } else { sname = dsname(dev, dkunit(dev), slice, WHOLE_SLICE_PART, partname); check_part(sname, dp, ext_offset, nsectors, ntracks, mbr_offset); if (slice >= MAX_SLICES) { kprintf("%s: too many slices\n", sname); slice++; continue; } sp = &ssp->dss_slices[slice]; if (mbr_setslice(sname, info, sp, dp, ext_offset) != 0) continue; slice++; } } ssp->dss_nslices = slice; /* If we found any more slices, recursively find all the subslices. */ for (dospart = 0; dospart < NDOSPART; dospart++) { if (ext_sizes[dospart] != 0) { mbr_extended(dev, info, ssp, ext_offsets[dospart], ext_sizes[dospart], base_ext_offset, nsectors, ntracks, mbr_offset, ++level); } } done: bp->b_flags |= B_INVAL | B_AGE; relpbuf(bp, NULL); }
/* NTSTATUS kuhl_m_standard_test(int argc, wchar_t * argv[]) { return STATUS_SUCCESS; } */ NTSTATUS kuhl_m_standard_exit(int argc, wchar_t * argv[]) { kprintf(L"Bye!\n"); return STATUS_FATAL_APP_EXIT; }