Beispiel #1
0
/*
 * Allocate memory for variable-sized tables,
 */
void
cpu_startup(void)
{
	vaddr_t minaddr, maxaddr;
	char pbuf[9];

	/*
	 * Good {morning,afternoon,evening,night}.
	 */
	printf("%s%s", copyright, version);
	format_bytes(pbuf, sizeof(pbuf), ctob(physmem));
	printf("total memory = %s\n", pbuf);

	minaddr = 0;
	/*
	 * Allocate a submap for physio.
	 */
	phys_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr, VM_PHYS_SIZE,
	    0, false, NULL);


	/*
	 * (No need to allocate an mbuf cluster submap.  Mbuf clusters
	 * are allocated via the pool allocator, and we use KSEG to
	 * map those pages.)
	 */

	format_bytes(pbuf, sizeof(pbuf), ptoa(uvmexp.free));
	printf("avail memory = %s\n", pbuf);
}
Beispiel #2
0
void test_format_bytes()
{
  size_t size = 14;
  char actual[size];

  format_bytes(actual, size, 0);
  assert_string_equal("0B", actual);
  format_bytes(actual, size, 1);
  assert_string_equal("1.00B", actual);
  format_bytes(actual, size, 12);
  assert_string_equal("12.00B", actual);
  format_bytes(actual, size, 1000);
  assert_string_equal("1.00KB", actual);
  format_bytes(actual, size, 1234);
  assert_string_equal("1.23KB", actual);
  format_bytes(actual, size, pow(1000, 2));
  assert_string_equal("1.00MB", actual);
  format_bytes(actual, size, pow(1000, 3));
  assert_string_equal("1.00GB", actual);
  format_bytes(actual, size, pow(1000, 4));
  assert_string_equal("1.00TB", actual);
  format_bytes(actual, size, pow(1000, 4) * 123.45);
  assert_string_equal("123.45TB", actual);
  format_bytes(actual, size, UINT64_MAX);
  assert_string_equal("18446744.07TB", actual);
}
/**
 * scan media to determine the chip's properties
 * this function resets the device
 */
static int
nor_scan_media(device_t self, struct nor_chip * const chip)
{
	struct nor_softc * const sc = device_private(self);
	char pbuf[3][sizeof("XXXX MB")];

	KASSERT(sc->sc_nor_if != NULL);
	KASSERT(sc->sc_nor_if->scan_media != NULL);
	int error = sc->sc_nor_if->scan_media(self, chip);
	if (error != 0)
		return error;

#ifdef NOR_VERBOSE
	aprint_normal_dev(self,
	    "manufacturer id: 0x%.4x (%s), device id: 0x%.4x\n",
	    chip->nc_manf_id,
	    nor_midtoname(chip->nc_manf_id),
	    chip->nc_dev_id);
#endif

	format_bytes(pbuf[0], sizeof(pbuf[0]), chip->nc_page_size);
	format_bytes(pbuf[1], sizeof(pbuf[1]), chip->nc_spare_size);
	format_bytes(pbuf[2], sizeof(pbuf[2]), chip->nc_block_size);
	aprint_normal_dev(self,
	    "page size: %s, spare size: %s, block size: %s\n",
	    pbuf[0], pbuf[1], pbuf[2]);

	format_bytes(pbuf[0], sizeof(pbuf[0]), chip->nc_size);
	aprint_normal_dev(self,
	    "LUN size: %" PRIu32 " blocks, LUNs: %" PRIu8
	    ", total storage size: %s\n",
	    chip->nc_lun_blocks, chip->nc_num_luns, pbuf[0]);

#ifdef NOTYET
	/* XXX does this apply to nor? */
	/*
	 * calculate badblock marker offset in oob
	 * we try to be compatible with linux here
	 */
	if (chip->nc_page_size > 512)
		chip->nc_badmarker_offs = 0;
	else
		chip->nc_badmarker_offs = 5;
#endif

	/* Calculate page shift and mask */
	chip->nc_page_shift = ffs(chip->nc_page_size) - 1;
	chip->nc_page_mask = ~(chip->nc_page_size - 1);
	/* same for block */
	chip->nc_block_shift = ffs(chip->nc_block_size) - 1;
	chip->nc_block_mask = ~(chip->nc_block_size - 1);

#ifdef NOTYET
	/* look for quirks here if needed in future */
	nor_quirks(self, chip);
#endif

	return 0;
}
int main()
{
	uint64_t last_in = 0;
	uint64_t last_out = 0;
	
    
    struct ifaddrs *ifa_list = 0, *ifa;
    
    if(getifaddrs(&ifa_list) == -1)
    {
        freeifaddrs(ifa_list);
       
    }
    
    for(ifa = ifa_list; ifa; ifa = ifa->ifa_next)
    {
        if(AF_LINK != ifa->ifa_addr->sa_family)
            continue;
        if(!(ifa->ifa_flags & IFF_UP) && !(ifa->ifa_flags & IFF_RUNNING))
            continue;
        if(ifa->ifa_data == 0)
            continue;
        
        if(strcmp(IFACE_CODE, ifa->ifa_name) == 0)
        {
            struct if_data *if_data = (struct if_data *)ifa->ifa_data;
            
            uint64_t in_packets = if_data->ifi_ipackets;
            uint64_t out_packets = if_data->ifi_opackets;
            uint64_t in_bytes = if_data->ifi_ibytes;
            uint64_t out_bytes = if_data->ifi_obytes;
            
            uint64_t net_in = 0;
            if(last_in > 0)
                net_in = in_bytes - last_in;
            
            uint64_t net_out = 0;
            if(last_out > 0)
                net_out = out_bytes - last_out;
            
            last_in = in_bytes;
            last_out = out_bytes;
            
            char buffer1[50], buffer2[50];
            //printf("Packets: in = %llu, out = %llu\n", in_packets, out_packets);
            printf("Bytes: in = %s, out = %s\n", format_bytes(buffer1, in_bytes), format_bytes(buffer2, out_bytes));
            //printf("Bandwidth: in = %s/s, out = %s/s\n", format_bytes(buffer1, net_in), format_bytes(buffer2, net_out));
           // printf("\n");
            
           
        }
    }
    
    if(ifa_list)
        freeifaddrs(ifa_list);
    
	
}
Beispiel #5
0
/*
 * Prints the given amount of bytes in a human readable manner.
 *
 */
static int print_bytes_human(char *outwalk, uint64_t bytes, const char *prefix_type) {
        if (strncmp(prefix_type, "decimal", strlen(prefix_type)) == 0) {
                return format_bytes(outwalk, bytes, DECIMAL_BASE, si_symbols);
        } else if (strncmp(prefix_type, "custom", strlen(prefix_type)) == 0) {
                return format_bytes(outwalk, bytes, BINARY_BASE, custom_symbols);
        } else {
                return format_bytes(outwalk, bytes, BINARY_BASE, iec_symbols);
        }
}
Beispiel #6
0
void
cpu_startup_common(void)
{
	vaddr_t minaddr, maxaddr;
	char pbuf[9];	/* "99999 MB" */

	pmap_tlb_info_evcnt_attach(&pmap_tlb0_info);

#ifdef MULTIPROCESSOR
	kcpuset_create(&cpus_halted, true);
		KASSERT(cpus_halted != NULL);
	kcpuset_create(&cpus_hatched, true);
		KASSERT(cpus_hatched != NULL);
	kcpuset_create(&cpus_paused, true);
		KASSERT(cpus_paused != NULL);
	kcpuset_create(&cpus_resumed, true);
		KASSERT(cpus_resumed != NULL);
	kcpuset_create(&cpus_running, true);
		KASSERT(cpus_running != NULL);
	kcpuset_set(cpus_hatched, cpu_number());
	kcpuset_set(cpus_running, cpu_number());
#endif

	cpu_hwrena_setup();

	/*
	 * Good {morning,afternoon,evening,night}.
	 */
	printf("%s%s", copyright, version);
	printf("%s\n", cpu_getmodel());
	format_bytes(pbuf, sizeof(pbuf), ctob(physmem));
	printf("total memory = %s\n", pbuf);

	minaddr = 0;
	/*
	 * Allocate a submap for physio.
	 */
	phys_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr,
				    VM_PHYS_SIZE, 0, FALSE, NULL);

	/*
	 * (No need to allocate an mbuf cluster submap.  Mbuf clusters
	 * are allocated via the pool allocator, and we use KSEG/XKPHYS to
	 * map those pages.)
	 */

	format_bytes(pbuf, sizeof(pbuf), ptoa(uvmexp.free));
	printf("avail memory = %s\n", pbuf);

#if defined(__mips_n32)
	module_machine = "mips-n32";
#endif
}
Beispiel #7
0
void
cpu_startup(void)
{
#if VAX46 || VAX48 || VAX49 || VAX53 || VAXANY
	vaddr_t		minaddr, maxaddr;
#endif
	char pbuf[9];

	/*
	 * Initialize error message buffer.
	 */
	initmsgbuf(msgbufaddr, round_page(MSGBUFSIZE));

	/*
	 * Good {morning,afternoon,evening,night}.
	 * Also call CPU init on systems that need that.
	 */
	printf("%s%s", copyright, version);
	printf("%s\n", cpu_getmodel());
        if (dep_call->cpu_conf)
                (*dep_call->cpu_conf)();

	format_bytes(pbuf, sizeof(pbuf), avail_end);
	printf("total memory = %s\n", pbuf);
	panicstr = NULL;
	mtpr(AST_NO, PR_ASTLVL);
	spl0();

#if VAX46 || VAX48 || VAX49 || VAX53 || VAXANY
	minaddr = 0;

	/*
	 * Allocate a submap for physio.  This map effectively limits the
	 * number of processes doing physio at any one time.
	 */
	phys_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr,
				   VM_PHYS_SIZE, 0, false, NULL);
#endif

	format_bytes(pbuf, sizeof(pbuf), ptoa(uvmexp.free));
	printf("avail memory = %s\n", pbuf);

#ifdef DDB
	if (boothowto & RB_KDB)
		Debugger();
#endif

	iomap_ex_malloc_safe = 1;
}
Beispiel #8
0
/*
 * cpu_startup: allocate memory for variable-sized tables.
 */
void
cpu_startup()
{
	vaddr_t minaddr, maxaddr;
	char pbuf[9];
	extern void greeting __P((void));

	if (fputype != FPU_NONE)
		m68k_make_fpu_idle_frame();

	/*
	 * Initialize the kernel crash dump header.
	 */
	cpu_init_kcore_hdr();

	/*
	 * Good {morning,afternoon,evening,night}.
	 */
	printf("%s%s", copyright, version);
	identifycpu();

	format_bytes(pbuf, sizeof(pbuf), ctob(physmem));
	printf("total memory = %s\n", pbuf);

	minaddr = 0;

	/*
	 * Allocate a submap for physio
	 */
	phys_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr,
				   VM_PHYS_SIZE, 0, false, NULL);

	/*
	 * Finally, allocate mbuf cluster submap.
	 */
	mb_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr,
				 nmbclusters * mclbytes, VM_MAP_INTRSAFE,
				 false, NULL);

	format_bytes(pbuf, sizeof(pbuf), ptoa(uvmexp.free));
	printf("avail memory = %s\n", pbuf);

	/*
	 * Say "Hi" to the world
	 */
	greeting();
}
Beispiel #9
0
int grow_machine_directory(void) {
        char buf[FORMAT_BYTES_MAX];
        struct statvfs a, b;
        uint64_t old_size, new_size, max_add;
        int r;

        /* Ensure the disk space data is accurate */
        sync_path("/var/lib/machines");
        sync_path("/var/lib/machines.raw");

        if (statvfs("/var/lib/machines.raw", &a) < 0)
                return -errno;

        if (statvfs("/var/lib/machines", &b) < 0)
                return -errno;

        /* Don't grow if not enough disk space is available on the host */
        if (((uint64_t) a.f_bavail * (uint64_t) a.f_bsize) <= VAR_LIB_MACHINES_FREE_MIN)
                return 0;

        /* Don't grow if at least 1/3th of the fs is still free */
        if (b.f_bavail > b.f_blocks / 3)
                return 0;

        /* Calculate how much we are willing to add at most */
        max_add = ((uint64_t) a.f_bavail * (uint64_t) a.f_bsize) - VAR_LIB_MACHINES_FREE_MIN;

        /* Calculate the old size */
        old_size = (uint64_t) b.f_blocks * (uint64_t) b.f_bsize;

        /* Calculate the new size as three times the size of what is used right now */
        new_size = ((uint64_t) b.f_blocks - (uint64_t) b.f_bavail) * (uint64_t) b.f_bsize * 3;

        /* Always, grow at least to the start size */
        if (new_size < VAR_LIB_MACHINES_SIZE_START)
                new_size = VAR_LIB_MACHINES_SIZE_START;

        /* If the new size is smaller than the old size, don't grow */
        if (new_size < old_size)
                return 0;

        /* Ensure we never add more than the maximum */
        if (new_size > old_size + max_add)
                new_size = old_size + max_add;

        r = btrfs_resize_loopback("/var/lib/machines", new_size, true);
        if (r <= 0)
                return r;

        /* Also bump the quota, of both the subvolume leaf qgroup, as
         * well as of any subtree quota group by the same id but a
         * higher level, if it exists. */
        (void) btrfs_qgroup_set_limit("/var/lib/machines", 0, new_size);
        (void) btrfs_subvol_set_subtree_quota_limit("/var/lib/machines", 0, new_size);

        log_info("Grew /var/lib/machines btrfs loopback file system to %s.", format_bytes(buf, sizeof(buf), new_size));
        return 1;
}
Beispiel #10
0
static const char *maybe_format_bytes(char *buf, size_t l, bool is_valid, uint64_t t) {
        if (!is_valid)
                return "-";
        if (arg_raw) {
                snprintf(buf, l, "%jd", t);
                return buf;
        }
        return format_bytes(buf, l, t);
}
Beispiel #11
0
void
cfi_print(device_t self, struct cfi * const cfi)
{
	char pbuf[sizeof("XXXX MB")];
	struct cfi_query_data * const qryp = &cfi->cfi_qry_data;

	format_bytes(pbuf, sizeof(pbuf), 1 << qryp->device_size);
	if (cfi->cfi_emulated) {
		aprint_normal_dev(self, "%s NOR flash %s %s\n",
			cfi->cfi_name, pbuf,
			cfi_interface_desc_str(qryp->interface_code_desc));
	} else {
		aprint_normal_dev(self, "CFI NOR flash %s %s\n", pbuf,
			cfi_interface_desc_str(qryp->interface_code_desc));
	}
#ifdef NOR_VERBOSE
	aprint_normal_dev(self, "manufacturer id %#x, device id %#x %#x %#x\n",
		cfi->cfi_id_data.id_mid,
		cfi->cfi_id_data.id_did[0],
		cfi->cfi_id_data.id_did[1],
		cfi->cfi_id_data.id_did[2]);
	aprint_normal_dev(self, "x%u device operating in %u-bit mode\n",
		8 << cfi->cfi_portwidth, 8 << cfi->cfi_chipwidth);
	aprint_normal_dev(self, "sw bits lo=%#x hi=%#x\n",
		cfi->cfi_id_data.id_swb_lo,
		cfi->cfi_id_data.id_swb_hi);
	aprint_normal_dev(self, "max multibyte write size %d\n",
		1 << qryp->write_nbyte_size_max);
	aprint_normal_dev(self, "%d Erase Block Region(s)\n",
		qryp->erase_blk_regions);
	for (u_int r=0; r < qryp->erase_blk_regions; r++) {
		size_t sz = qryp->erase_blk_info[r].z * 256;
		format_bytes(pbuf, sizeof(pbuf), sz);
		aprint_normal("    %d: %d blocks, size %s\n", r,
			qryp->erase_blk_info[r].y + 1, pbuf);
	}
#endif

	switch (cfi->cfi_qry_data.id_pri) {
	case 0x0002:
		cfi_0002_print(self, cfi);
		break;
	}
}
Beispiel #12
0
void
cpu_startup(void)
{
	extern int physmem;
	extern struct vm_map *mb_map;
	vaddr_t minaddr, maxaddr;
	char pbuf[9];

	printf("%s%s", copyright, version);
	format_bytes(pbuf, sizeof(pbuf), ptoa(physmem));
	printf("total memory = %s\n", pbuf);

	minaddr = 0;
	mb_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr,
	    nmbclusters * mclbytes, VM_MAP_INTRSAFE, false, NULL);

	format_bytes(pbuf, sizeof(pbuf), ptoa(uvmexp.free));
	printf("avail memory = %s\n", pbuf);
}
Beispiel #13
0
static void test_physical_memory(void) {
        uint64_t p;
        char buf[FORMAT_BYTES_MAX];

        p = physical_memory();
        assert_se(p > 0);
        assert_se(p < UINT64_MAX);
        assert_se(p % page_size() == 0);

        log_info("Memory: %s (%" PRIu64 ")", format_bytes(buf, sizeof(buf), p), p);
}
Beispiel #14
0
static void
ed_mca_attach(device_t parent, device_t self, void *aux)
{
    struct ed_softc *ed = device_private(self);
    struct edc_mca_softc *sc = device_private(parent);
    struct ed_attach_args *eda = aux;
    char pbuf[8];
    int drv_flags;

    ed->sc_dev = self;
    ed->edc_softc = sc;
    ed->sc_devno  = eda->edc_drive;
    edc_add_disk(sc, ed);

    bufq_alloc(&ed->sc_q, "disksort", BUFQ_SORT_RAWBLOCK);
    mutex_init(&ed->sc_q_lock, MUTEX_DEFAULT, IPL_VM);

    if (ed_get_params(ed, &drv_flags)) {
        printf(": IDENTIFY failed, no disk found\n");
        return;
    }

    format_bytes(pbuf, sizeof(pbuf),
                 (u_int64_t) ed->sc_capacity * DEV_BSIZE);
    printf(": %s, %u cyl, %u head, %u sec, 512 bytes/sect x %u sectors\n",
           pbuf,
           ed->cyl, ed->heads, ed->sectors,
           ed->sc_capacity);

    printf("%s: %u spares/cyl, %s, %s, %s, %s, %s\n",
           device_xname(ed->sc_dev), ed->spares,
           (drv_flags & (1 << 0)) ? "NoRetries" : "Retries",
           (drv_flags & (1 << 1)) ? "Removable" : "Fixed",
           (drv_flags & (1 << 2)) ? "SkewedFormat" : "NoSkew",
           (drv_flags & (1 << 3)) ? "ZeroDefect" : "Defects",
           (drv_flags & (1 << 4)) ? "InvalidSecondary" : "SecondaryOK"
          );

    /*
     * Initialize and attach the disk structure.
     */
    disk_init(&ed->sc_dk, device_xname(ed->sc_dev), &eddkdriver);
    disk_attach(&ed->sc_dk);
    rnd_attach_source(&ed->rnd_source, device_xname(ed->sc_dev),
                      RND_TYPE_DISK, RND_FLAG_DEFAULT);

    ed->sc_flags |= EDF_INIT;

    /*
     * XXX We should try to discovery wedges here, but
     * XXX that would mean being able to do I/O.  Should
     * XXX use config_defer() here.
     */
}
Beispiel #15
0
void
sh_startup(void)
{
	vaddr_t minaddr, maxaddr;
	char pbuf[9];
	const char *model = cpu_getmodel();

	printf("%s%s", copyright, version);
	if (*model != '\0')
		printf("%s", model);
#ifdef DEBUG
	printf("general exception handler:\t%d byte\n",
	    sh_vector_generic_end - sh_vector_generic);
	printf("TLB miss exception handler:\t%d byte\n",
#if defined(SH3) && defined(SH4)
	    CPU_IS_SH3 ? sh3_vector_tlbmiss_end - sh3_vector_tlbmiss :
	    sh4_vector_tlbmiss_end - sh4_vector_tlbmiss
#elif defined(SH3)
	    sh3_vector_tlbmiss_end - sh3_vector_tlbmiss
#elif defined(SH4)
	    sh4_vector_tlbmiss_end - sh4_vector_tlbmiss
#endif
	    );
	printf("interrupt exception handler:\t%d byte\n",
	    sh_vector_interrupt_end - sh_vector_interrupt);
#endif /* DEBUG */

	format_bytes(pbuf, sizeof(pbuf), ctob(physmem));
	printf("total memory = %s\n", pbuf);

	minaddr = 0;

	/*
	 * Allocate a submap for physio
	 */
	phys_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr,
	    VM_PHYS_SIZE, 0, false, NULL);

	format_bytes(pbuf, sizeof(pbuf), ptoa(uvmexp.free));
	printf("avail memory = %s\n", pbuf);
}
Beispiel #16
0
/*
 * cpu_startup: allocate memory for variable-sized tables,
 * initialize CPU, and do autoconfiguration.
 */
void
cpu_startup(void)
{
	vaddr_t minaddr, maxaddr;
	char pbuf[9];
#ifdef DEBUG
	extern int pmapdebug;
	int opmapdebug = pmapdebug;

	pmapdebug = 0;
#endif

	/*
	 * Good {morning,afternoon,evening,night}.
	 */
	printf("%s%s", copyright, version);
	printf("SONY NET WORK STATION, Model %s, ", idrom.id_model);
	printf("Machine ID #%d\n", idrom.id_serial);
	format_bytes(pbuf, sizeof(pbuf), ctob(physmem));
	printf("total memory = %s\n", pbuf);

	minaddr = 0;
	/*
	 * Allocate a submap for physio
	 */
	phys_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr,
	    VM_PHYS_SIZE, 0, false, NULL);

	/*
	 * No need to allocate an mbuf cluster submap.  Mbuf clusters
	 * are allocated via the pool allocator, and we use KSEG to
	 * map those pages.
	 */

#ifdef DEBUG
	pmapdebug = opmapdebug;
#endif
	format_bytes(pbuf, sizeof(pbuf), ptoa(uvmexp.free));
	printf("avail memory = %s\n", pbuf);
}
Beispiel #17
0
void
cpu_startup(void)
{
	char pbuf[9];
	vaddr_t minaddr, maxaddr;
#ifdef DEBUG
	extern int pmapdebug;		/* XXX */
	int opmapdebug = pmapdebug;

	pmapdebug = 0;		/* Shut up pmap debug during bootstrap */
#endif

	/*
	 * Good {morning,afternoon,evening,night}.
	 */
	printf("%s%s", copyright, version);
	printf("%s\n", cpu_model);
	format_bytes(pbuf, sizeof(pbuf), ctob(physmem));
	printf("total memory = %s\n", pbuf);

	minaddr = 0;
	/*
	 * Allocate a submap for physio
	 */
	phys_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr,
	    VM_PHYS_SIZE, 0, FALSE, NULL);

	/*
	 * No need to allocate an mbuf cluster submap.  Mbuf clusters
	 * are allocated via the pool allocator, and we use KSEG to
	 * map those pages.
	 */

#ifdef DEBUG
	pmapdebug = opmapdebug;
#endif
	format_bytes(pbuf, sizeof(pbuf), ptoa(uvmexp.free));
	printf("avail memory = %s\n", pbuf);
}
Beispiel #18
0
Datei: ld.c Projekt: MarginC/kame
void
ldattach(struct ld_softc *sc)
{
	char buf[9];

	if ((sc->sc_flags & LDF_ENABLED) == 0) {
		printf("%s: disabled\n", sc->sc_dv.dv_xname);
		return;
	}

	/* Initialise and attach the disk structure. */
	sc->sc_dk.dk_driver = &lddkdriver;
	sc->sc_dk.dk_name = sc->sc_dv.dv_xname;
	disk_attach(&sc->sc_dk);

	if (sc->sc_maxxfer > MAXPHYS)
		sc->sc_maxxfer = MAXPHYS;

	/* Build synthetic geometry. */
	if (sc->sc_secperunit <= 528 * 2048)		/* 528MB */
		sc->sc_nheads = 16;
	else if (sc->sc_secperunit <= 1024 * 2048)	/* 1GB */
		sc->sc_nheads = 32;
	else if (sc->sc_secperunit <= 21504 * 2048)	/* 21GB */
		sc->sc_nheads = 64;
	else if (sc->sc_secperunit <= 43008 * 2048)	/* 42GB */
		sc->sc_nheads = 128;
	else
		sc->sc_nheads = 255;

	sc->sc_nsectors = 63;
	sc->sc_ncylinders = sc->sc_secperunit / 
	    (sc->sc_nheads * sc->sc_nsectors);

	format_bytes(buf, sizeof(buf), (u_int64_t)sc->sc_secperunit *
	    sc->sc_secsize);
	printf("%s: %s, %d cyl, %d head, %d sec, %d bytes/sect x %d sectors\n",
	    sc->sc_dv.dv_xname, buf, sc->sc_ncylinders, sc->sc_nheads,
	    sc->sc_nsectors, sc->sc_secsize, sc->sc_secperunit);

#if NRND > 0
	/* Attach the device into the rnd source list. */
	rnd_attach_source(&sc->sc_rnd_source, sc->sc_dv.dv_xname,
	    RND_TYPE_DISK, 0);
#endif

	/* Set the `shutdownhook'. */
	if (ld_sdh == NULL)
		ld_sdh = shutdownhook_establish(ldshutdown, NULL);
	BUFQ_INIT(&sc->sc_bufq);
}
Beispiel #19
0
/*
 * This is called during pseudo-device attachment.
 */
void
md_attach_hook(int unit, struct md_conf *md)
{
	char pbuf[9];

	if (unit == 0 && md_is_root) {
		/* Setup root ramdisk */
		md->md_addr = (void *)md_root_image;
		md->md_size = (size_t)md_root_size;
		md->md_type = MD_KMEM_FIXED;
		format_bytes(pbuf, sizeof(pbuf), md->md_size);
		aprint_verbose("md%d: internal %s image area\n", unit, pbuf);
	}
}
Beispiel #20
0
int main(int argc, char *argv[]) {
        char buf[CONST_MAX(FORMAT_TIMESPAN_MAX, FORMAT_BYTES_MAX)];
        nsec_t nsec;
        uint64_t v;
        int r;

        log_parse_environment();
        log_open();

        assert_se(procfs_cpu_get_usage(&nsec) >= 0);
        log_info("Current system CPU time: %s", format_timespan(buf, sizeof(buf), nsec/NSEC_PER_USEC, 1));

        assert_se(procfs_memory_get_current(&v) >= 0);
        log_info("Current memory usage: %s", format_bytes(buf, sizeof(buf), v));

        assert_se(procfs_tasks_get_current(&v) >= 0);
        log_info("Current number of tasks: %" PRIu64, v);

        assert_se(procfs_tasks_get_limit(&v) >= 0);
        log_info("Limit of tasks: %" PRIu64, v);
        assert_se(v > 0);
        assert_se(procfs_tasks_set_limit(v) >= 0);

        if (v > 100) {
                uint64_t w;
                r = procfs_tasks_set_limit(v-1);
                assert_se(IN_SET(r, 0, -EPERM, -EACCES, -EROFS));

                assert_se(procfs_tasks_get_limit(&w) >= 0);
                assert_se((r == 0 && w == v - 1) || (r < 0 && w == v));

                assert_se(procfs_tasks_set_limit(v) >= 0);

                assert_se(procfs_tasks_get_limit(&w) >= 0);
                assert_se(v == w);
        }

        return 0;
}
Beispiel #21
0
void print_entry(struct dirent *entry, struct stat s)
{
  #define value_len 20
  static const char *time_fmt = "%F %T";
  static const char thick = '=';
  static const char thin = '-';
  static const int width = 72;
  static const int col = 12;

  print_border(thick, width, 0);
  print_row(entry->d_name, NULL, width, 0);
  print_border(thick, width, col);
  char size[value_len];
  format_bytes(size, value_len, s.st_size);
  print_row("Size", size, width, col);
  print_border(thin, width, col);
  char *type;
  switch (entry->d_type) {
    case DT_REG:
      type = "Regular file";
      break;
    case DT_LNK:
      type = "Symbolic link";
      break;
    default:
      type = "Unrecognized";
      break;
  }
  print_row("Type", type, width, col);
  print_border(thin, width, col);
  char mtime[value_len];
  strftime(mtime, value_len, time_fmt, localtime(&s.st_mtimespec.tv_sec));
  print_row("Modified", mtime, width, col);
  print_border(thin, width, col);
  char atime[value_len];
  strftime(atime, value_len, time_fmt, localtime(&s.st_atimespec.tv_sec));
  print_row("Accessed", atime, width, col);
  print_border(thick, width, 0);
}
Beispiel #22
0
void ScopedPrinter::printBinaryImpl(StringRef Label, StringRef Str,
                                    ArrayRef<uint8_t> Data, bool Block,
                                    uint32_t StartOffset) {
  if (Data.size() > 16)
    Block = true;

  if (Block) {
    startLine() << Label;
    if (!Str.empty())
      OS << ": " << Str;
    OS << " (\n";
    if (!Data.empty())
      OS << format_bytes_with_ascii(Data, StartOffset, 16, 4,
                                    (IndentLevel + 1) * 2, true)
         << "\n";
    startLine() << ")\n";
  } else {
    startLine() << Label << ":";
    if (!Str.empty())
      OS << " " << Str;
    OS << " (" << format_bytes(Data, None, Data.size(), 1, 0, true) << ")\n";
  }
}
Beispiel #23
0
int main(int argc, char *argv[]) {
        BtrfsQuotaInfo quota;
        int r, fd;

        fd = open("/", O_RDONLY|O_CLOEXEC|O_DIRECTORY);
        if (fd < 0)
                log_error_errno(errno, "Failed to open root directory: %m");
        else {
                char ts[FORMAT_TIMESTAMP_MAX], bs[FORMAT_BYTES_MAX];
                BtrfsSubvolInfo info;

                r = btrfs_subvol_get_info_fd(fd, 0, &info);
                if (r < 0)
                        log_error_errno(r, "Failed to get subvolume info: %m");
                else {
                        log_info("otime: %s", format_timestamp(ts, sizeof(ts), info.otime));
                        log_info("read-only (search): %s", yes_no(info.read_only));
                }

                r = btrfs_qgroup_get_quota_fd(fd, 0, &quota);
                if (r < 0)
                        log_error_errno(r, "Failed to get quota info: %m");
                else {
                        log_info("referenced: %s", strna(format_bytes(bs, sizeof(bs), quota.referenced)));
                        log_info("exclusive: %s", strna(format_bytes(bs, sizeof(bs), quota.exclusive)));
                        log_info("referenced_max: %s", strna(format_bytes(bs, sizeof(bs), quota.referenced_max)));
                        log_info("exclusive_max: %s", strna(format_bytes(bs, sizeof(bs), quota.exclusive_max)));
                }

                r = btrfs_subvol_get_read_only_fd(fd);
                if (r < 0)
                        log_error_errno(r, "Failed to get read only flag: %m");
                else
                        log_info("read-only (ioctl): %s", yes_no(r));

                safe_close(fd);
        }

        r = btrfs_subvol_make("/xxxtest");
        if (r < 0)
                log_error_errno(r, "Failed to make subvolume: %m");

        r = write_string_file("/xxxtest/afile", "ljsadhfljasdkfhlkjdsfha", WRITE_STRING_FILE_CREATE);
        if (r < 0)
                log_error_errno(r, "Failed to write file: %m");

        r = btrfs_subvol_snapshot("/xxxtest", "/xxxtest2", 0);
        if (r < 0)
                log_error_errno(r, "Failed to make snapshot: %m");

        r = btrfs_subvol_snapshot("/xxxtest", "/xxxtest3", BTRFS_SNAPSHOT_READ_ONLY);
        if (r < 0)
                log_error_errno(r, "Failed to make snapshot: %m");

        r = btrfs_subvol_remove("/xxxtest", BTRFS_REMOVE_QUOTA);
        if (r < 0)
                log_error_errno(r, "Failed to remove subvolume: %m");

        r = btrfs_subvol_remove("/xxxtest2", BTRFS_REMOVE_QUOTA);
        if (r < 0)
                log_error_errno(r, "Failed to remove subvolume: %m");

        r = btrfs_subvol_remove("/xxxtest3", BTRFS_REMOVE_QUOTA);
        if (r < 0)
                log_error_errno(r, "Failed to remove subvolume: %m");

        r = btrfs_subvol_snapshot("/etc", "/etc2", BTRFS_SNAPSHOT_READ_ONLY|BTRFS_SNAPSHOT_FALLBACK_COPY);
        if (r < 0)
                log_error_errno(r, "Failed to make snapshot: %m");

        r = btrfs_subvol_remove("/etc2", BTRFS_REMOVE_QUOTA);
        if (r < 0)
                log_error_errno(r, "Failed to remove subvolume: %m");

        r = btrfs_subvol_make("/xxxrectest");
        if (r < 0)
                log_error_errno(r, "Failed to make subvolume: %m");

        r = btrfs_subvol_make("/xxxrectest/xxxrectest2");
        if (r < 0)
                log_error_errno(r, "Failed to make subvolume: %m");

        r = btrfs_subvol_make("/xxxrectest/xxxrectest3");
        if (r < 0)
                log_error_errno(r, "Failed to make subvolume: %m");

        r = btrfs_subvol_make("/xxxrectest/xxxrectest3/sub");
        if (r < 0)
                log_error_errno(r, "Failed to make subvolume: %m");

        if (mkdir("/xxxrectest/dir", 0755) < 0)
                log_error_errno(errno, "Failed to make directory: %m");

        r = btrfs_subvol_make("/xxxrectest/dir/xxxrectest4");
        if (r < 0)
                log_error_errno(r, "Failed to make subvolume: %m");

        if (mkdir("/xxxrectest/dir/xxxrectest4/dir", 0755) < 0)
                log_error_errno(errno, "Failed to make directory: %m");

        r = btrfs_subvol_make("/xxxrectest/dir/xxxrectest4/dir/xxxrectest5");
        if (r < 0)
                log_error_errno(r, "Failed to make subvolume: %m");

        if (mkdir("/xxxrectest/mnt", 0755) < 0)
                log_error_errno(errno, "Failed to make directory: %m");

        r = btrfs_subvol_snapshot("/xxxrectest", "/xxxrectest2", BTRFS_SNAPSHOT_RECURSIVE);
        if (r < 0)
                log_error_errno(r, "Failed to snapshot subvolume: %m");

        r = btrfs_subvol_remove("/xxxrectest", BTRFS_REMOVE_QUOTA|BTRFS_REMOVE_RECURSIVE);
        if (r < 0)
                log_error_errno(r, "Failed to recursively remove subvolume: %m");

        r = btrfs_subvol_remove("/xxxrectest2", BTRFS_REMOVE_QUOTA|BTRFS_REMOVE_RECURSIVE);
        if (r < 0)
                log_error_errno(r, "Failed to recursively remove subvolume: %m");

        r = btrfs_subvol_make("/xxxquotatest");
        if (r < 0)
                log_error_errno(r, "Failed to make subvolume: %m");

        r = btrfs_subvol_auto_qgroup("/xxxquotatest", 0, true);
        if (r < 0)
                log_error_errno(r, "Failed to set up auto qgroup: %m");

        r = btrfs_subvol_make("/xxxquotatest/beneath");
        if (r < 0)
                log_error_errno(r, "Failed to make subvolume: %m");

        r = btrfs_subvol_auto_qgroup("/xxxquotatest/beneath", 0, false);
        if (r < 0)
                log_error_errno(r, "Failed to set up auto qgroup: %m");

        r = btrfs_qgroup_set_limit("/xxxquotatest/beneath", 0, 4ULL * 1024 * 1024 * 1024);
        if (r < 0)
                log_error_errno(r, "Failed to set up quota limit: %m");

        r = btrfs_subvol_set_subtree_quota_limit("/xxxquotatest", 0, 5ULL * 1024 * 1024 * 1024);
        if (r < 0)
                log_error_errno(r, "Failed to set up quota limit: %m");

        r = btrfs_subvol_snapshot("/xxxquotatest", "/xxxquotatest2", BTRFS_SNAPSHOT_RECURSIVE|BTRFS_SNAPSHOT_QUOTA);
        if (r < 0)
                log_error_errno(r, "Failed to setup snapshot: %m");

        r = btrfs_qgroup_get_quota("/xxxquotatest2/beneath", 0, &quota);
        if (r < 0)
                log_error_errno(r, "Failed to query quota: %m");

        assert_se(quota.referenced_max == 4ULL * 1024 * 1024 * 1024);

        r = btrfs_subvol_get_subtree_quota("/xxxquotatest2", 0, &quota);
        if (r < 0)
                log_error_errno(r, "Failed to query quota: %m");

        assert_se(quota.referenced_max == 5ULL * 1024 * 1024 * 1024);

        r = btrfs_subvol_remove("/xxxquotatest", BTRFS_REMOVE_QUOTA|BTRFS_REMOVE_RECURSIVE);
        if (r < 0)
                log_error_errno(r, "Failed remove subvolume: %m");

        r = btrfs_subvol_remove("/xxxquotatest2", BTRFS_REMOVE_QUOTA|BTRFS_REMOVE_RECURSIVE);
        if (r < 0)
                log_error_errno(r, "Failed remove subvolume: %m");

        return 0;
}
Beispiel #24
0
void
init_x86_64(paddr_t first_avail)
{
	extern void consinit(void);
	extern struct extent *iomem_ex;
	struct region_descriptor region;
	struct mem_segment_descriptor *ldt_segp;
	int x, first16q, ist;
	u_int64_t seg_start, seg_end;
	u_int64_t seg_start1, seg_end1;

	cpu_init_msrs(&cpu_info_primary);

	proc0.p_addr = proc0paddr;
	cpu_info_primary.ci_curpcb = &proc0.p_addr->u_pcb;

	x86_bus_space_init();

	consinit();	/* XXX SHOULD NOT BE DONE HERE */

	/*
	 * Initailize PAGE_SIZE-dependent variables.
	 */
	uvm_setpagesize();

#if 0
	uvmexp.ncolors = 2;
#endif
 
	/*
	 * Boot arguments are in a single page specified by /boot.
	 *
	 * We require the "new" vector form, as well as memory ranges
	 * to be given in bytes rather than KB.
	 *
	 * locore copies the data into bootinfo[] for us.
	 */
	if ((bootapiver & (BAPIV_VECTOR | BAPIV_BMEMMAP)) ==
	    (BAPIV_VECTOR | BAPIV_BMEMMAP)) {
		if (bootinfo_size >= sizeof(bootinfo))
			panic("boot args too big");

		getbootinfo(bootinfo, bootinfo_size);
	} else
		panic("invalid /boot");

	avail_start = PAGE_SIZE; /* BIOS leaves data in low memory */
				 /* and VM system doesn't work with phys 0 */
#ifdef MULTIPROCESSOR
	if (avail_start < MP_TRAMPOLINE + PAGE_SIZE)
		avail_start = MP_TRAMPOLINE + PAGE_SIZE;
#endif

	/*
	 * Call pmap initialization to make new kernel address space.
	 * We must do this before loading pages into the VM system.
	 */
	pmap_bootstrap(VM_MIN_KERNEL_ADDRESS,
	    IOM_END + trunc_page(KBTOB(biosextmem)));

	if (avail_start != PAGE_SIZE)
		pmap_prealloc_lowmem_ptps();

	if (mem_cluster_cnt == 0) {
		/*
		 * Allocate the physical addresses used by RAM from the iomem
		 * extent map.  This is done before the addresses are
		 * page rounded just to make sure we get them all.
		 */
		if (extent_alloc_region(iomem_ex, 0, KBTOB(biosbasemem),
		    EX_NOWAIT)) {
			/* XXX What should we do? */
			printf("WARNING: CAN'T ALLOCATE BASE MEMORY FROM "
			    "IOMEM EXTENT MAP!\n");
		}
		mem_clusters[0].start = 0;
		mem_clusters[0].size = trunc_page(KBTOB(biosbasemem));
		physmem += atop(mem_clusters[0].size);
		if (extent_alloc_region(iomem_ex, IOM_END, KBTOB(biosextmem),
		    EX_NOWAIT)) {
			/* XXX What should we do? */
			printf("WARNING: CAN'T ALLOCATE EXTENDED MEMORY FROM "
			    "IOMEM EXTENT MAP!\n");
		}
#if 0
#if NISADMA > 0
		/*
		 * Some motherboards/BIOSes remap the 384K of RAM that would
		 * normally be covered by the ISA hole to the end of memory
		 * so that it can be used.  However, on a 16M system, this
		 * would cause bounce buffers to be allocated and used.
		 * This is not desirable behaviour, as more than 384K of
		 * bounce buffers might be allocated.  As a work-around,
		 * we round memory down to the nearest 1M boundary if
		 * we're using any isadma devices and the remapped memory
		 * is what puts us over 16M.
		 */
		if (biosextmem > (15*1024) && biosextmem < (16*1024)) {
			char pbuf[9];

			format_bytes(pbuf, sizeof(pbuf),
			    biosextmem - (15*1024));
			printf("Warning: ignoring %s of remapped memory\n",
			    pbuf);
			biosextmem = (15*1024);
		}
#endif
#endif
		mem_clusters[1].start = IOM_END;
		mem_clusters[1].size = trunc_page(KBTOB(biosextmem));
		physmem += atop(mem_clusters[1].size);

		mem_cluster_cnt = 2;

		avail_end = IOM_END + trunc_page(KBTOB(biosextmem));
	}

	/*
	 * If we have 16M of RAM or less, just put it all on
	 * the default free list.  Otherwise, put the first
	 * 16M of RAM on a lower priority free list (so that
	 * all of the ISA DMA'able memory won't be eaten up
	 * first-off).
	 */
	if (avail_end <= (16 * 1024 * 1024))
		first16q = VM_FREELIST_DEFAULT;
	else
		first16q = VM_FREELIST_FIRST16;

	/* Make sure the end of the space used by the kernel is rounded. */
	first_avail = round_page(first_avail);
	kern_end = KERNBASE + first_avail;

	/*
	 * Now, load the memory clusters (which have already been
	 * rounded and truncated) into the VM system.
	 *
	 * NOTE: WE ASSUME THAT MEMORY STARTS AT 0 AND THAT THE KERNEL
	 * IS LOADED AT IOM_END (1M).
	 */
	for (x = 0; x < mem_cluster_cnt; x++) {
		seg_start = mem_clusters[x].start;
		seg_end = mem_clusters[x].start + mem_clusters[x].size;
		seg_start1 = 0;
		seg_end1 = 0;

		if (seg_start > 0xffffffffULL) {
			printf("skipping %lld bytes of memory above 4GB\n",
			    seg_end - seg_start);
			continue;
		}
		if (seg_end > 0x100000000ULL) {
			printf("skipping %lld bytes of memory above 4GB\n",
			    seg_end - 0x100000000ULL);
			seg_end = 0x100000000ULL;
		}

		/*
		 * Skip memory before our available starting point.
		 */
		if (seg_end <= avail_start)
			continue;

		if (avail_start >= seg_start && avail_start < seg_end) {
			if (seg_start != 0)
				panic("init_x86_64: memory doesn't start at 0");
			seg_start = avail_start;
			if (seg_start == seg_end)
				continue;
		}

		/*
		 * If this segment contains the kernel, split it
		 * in two, around the kernel.
		 */
		if (seg_start <= IOM_END && first_avail <= seg_end) {
			seg_start1 = first_avail;
			seg_end1 = seg_end;
			seg_end = IOM_END;
		}

		/* First hunk */
		if (seg_start != seg_end) {
			if (seg_start <= (16 * 1024 * 1024) &&
			    first16q != VM_FREELIST_DEFAULT) {
				u_int64_t tmp;

				if (seg_end > (16 * 1024 * 1024))
					tmp = (16 * 1024 * 1024);
				else
					tmp = seg_end;
#if DEBUG_MEMLOAD
				printf("loading 0x%qx-0x%qx (0x%lx-0x%lx)\n",
				    (unsigned long long)seg_start,
				    (unsigned long long)tmp,
				    atop(seg_start), atop(tmp));
#endif
				uvm_page_physload(atop(seg_start),
				    atop(tmp), atop(seg_start),
				    atop(tmp), first16q);
				seg_start = tmp;
			}

			if (seg_start != seg_end) {
#if DEBUG_MEMLOAD
				printf("loading 0x%qx-0x%qx (0x%lx-0x%lx)\n",
				    (unsigned long long)seg_start,
				    (unsigned long long)seg_end,
				    atop(seg_start), atop(seg_end));
#endif
				uvm_page_physload(atop(seg_start),
				    atop(seg_end), atop(seg_start),
				    atop(seg_end), VM_FREELIST_DEFAULT);
			}
		}

		/* Second hunk */
		if (seg_start1 != seg_end1) {
			if (seg_start1 <= (16 * 1024 * 1024) &&
			    first16q != VM_FREELIST_DEFAULT) {
				u_int64_t tmp;

				if (seg_end1 > (16 * 1024 * 1024))
					tmp = (16 * 1024 * 1024);
				else
					tmp = seg_end1;
#if DEBUG_MEMLOAD
				printf("loading 0x%qx-0x%qx (0x%lx-0x%lx)\n",
				    (unsigned long long)seg_start1,
				    (unsigned long long)tmp,
				    atop(seg_start1), atop(tmp));
#endif
				uvm_page_physload(atop(seg_start1),
				    atop(tmp), atop(seg_start1),
				    atop(tmp), first16q);
				seg_start1 = tmp;
			}

			if (seg_start1 != seg_end1) {
#if DEBUG_MEMLOAD
				printf("loading 0x%qx-0x%qx (0x%lx-0x%lx)\n",
				    (unsigned long long)seg_start1,
				    (unsigned long long)seg_end1,
				    atop(seg_start1), atop(seg_end1));
#endif
				uvm_page_physload(atop(seg_start1),
				    atop(seg_end1), atop(seg_start1),
				    atop(seg_end1), VM_FREELIST_DEFAULT);
			}
		}
	}

	/*
	 * Steal memory for the message buffer (at end of core).
	 */
	{
		struct vm_physseg *vps = NULL;
		psize_t sz = round_page(MSGBUFSIZE);
		psize_t reqsz = sz;

		for (x = 0; x < vm_nphysseg; x++) {
			vps = &vm_physmem[x];
			if (ptoa(vps->avail_end) == avail_end)
				break;
		}
		if (x == vm_nphysseg)
			panic("init_x86_64: can't find end of memory");

		/* Shrink so it'll fit in the last segment. */
		if ((vps->avail_end - vps->avail_start) < atop(sz))
			sz = ptoa(vps->avail_end - vps->avail_start);

		vps->avail_end -= atop(sz);
		vps->end -= atop(sz);
		msgbuf_paddr = ptoa(vps->avail_end);

		/* Remove the last segment if it now has no pages. */
		if (vps->start == vps->end) {
			for (vm_nphysseg--; x < vm_nphysseg; x++)
				vm_physmem[x] = vm_physmem[x + 1];
		}

		/* Now find where the new avail_end is. */
		for (avail_end = 0, x = 0; x < vm_nphysseg; x++)
			if (vm_physmem[x].avail_end > avail_end)
				avail_end = vm_physmem[x].avail_end;
		avail_end = ptoa(avail_end);

		/* Warn if the message buffer had to be shrunk. */
		if (sz != reqsz)
			printf("WARNING: %ld bytes not available for msgbuf "
			    "in last cluster (%ld used)\n", reqsz, sz);
	}

	/*
	 * XXXfvdl todo: acpi wakeup code.
	 */

	pmap_growkernel(VM_MIN_KERNEL_ADDRESS + 32 * 1024 * 1024);

	pmap_kenter_pa(idt_vaddr, idt_paddr, VM_PROT_READ|VM_PROT_WRITE);
	pmap_kenter_pa(idt_vaddr + PAGE_SIZE, idt_paddr + PAGE_SIZE,
	    VM_PROT_READ|VM_PROT_WRITE);

	pmap_kenter_pa(lo32_vaddr, lo32_paddr, VM_PROT_READ|VM_PROT_WRITE);

	idt = (struct gate_descriptor *)idt_vaddr;
	gdtstore = (char *)(idt + NIDT);
	ldtstore = gdtstore + DYNSEL_START;

	/* make gdt gates and memory segments */
	set_mem_segment(GDT_ADDR_MEM(gdtstore, GCODE_SEL), 0, 0xfffff, SDT_MEMERA,
	    SEL_KPL, 1, 0, 1);

	set_mem_segment(GDT_ADDR_MEM(gdtstore, GDATA_SEL), 0, 0xfffff, SDT_MEMRWA,
	    SEL_KPL, 1, 0, 1);

	set_sys_segment(GDT_ADDR_SYS(gdtstore, GLDT_SEL), ldtstore, LDT_SIZE - 1,
	    SDT_SYSLDT, SEL_KPL, 0);

	set_mem_segment(GDT_ADDR_MEM(gdtstore, GUCODE_SEL), 0,
	    atop(VM_MAXUSER_ADDRESS) - 1, SDT_MEMERA, SEL_UPL, 1, 0, 1);

	set_mem_segment(GDT_ADDR_MEM(gdtstore, GUDATA_SEL), 0,
	    atop(VM_MAXUSER_ADDRESS) - 1, SDT_MEMRWA, SEL_UPL, 1, 0, 1);

	/* make ldt gates and memory segments */
	setgate((struct gate_descriptor *)(ldtstore + LSYS5CALLS_SEL),
	    &IDTVEC(oosyscall), 0, SDT_SYS386CGT, SEL_UPL,
	    GSEL(GCODE_SEL, SEL_KPL));

	*(struct mem_segment_descriptor *)(ldtstore + LUCODE_SEL) =
	    *GDT_ADDR_MEM(gdtstore, GUCODE_SEL);
	*(struct mem_segment_descriptor *)(ldtstore + LUDATA_SEL) =
	    *GDT_ADDR_MEM(gdtstore, GUDATA_SEL);

	/*
	 * 32 bit GDT entries.
	 */

	set_mem_segment(GDT_ADDR_MEM(gdtstore, GUCODE32_SEL), 0,
	    atop(VM_MAXUSER_ADDRESS) - 1, SDT_MEMERA, SEL_UPL, 1, 1, 0);

	set_mem_segment(GDT_ADDR_MEM(gdtstore, GUDATA32_SEL), 0,
	    atop(VM_MAXUSER_ADDRESS) - 1, SDT_MEMRWA, SEL_UPL, 1, 1, 0);

	/*
	 * 32 bit LDT entries.
	 */
	ldt_segp = (struct mem_segment_descriptor *)(ldtstore + LUCODE32_SEL);
	set_mem_segment(ldt_segp, 0, atop(VM_MAXUSER_ADDRESS32) - 1,
	    SDT_MEMERA, SEL_UPL, 1, 1, 0);
	ldt_segp = (struct mem_segment_descriptor *)(ldtstore + LUDATA32_SEL);
	set_mem_segment(ldt_segp, 0, atop(VM_MAXUSER_ADDRESS32) - 1,
	    SDT_MEMRWA, SEL_UPL, 1, 1, 0);

	/*
	 * Other entries.
	 */
	memcpy((struct gate_descriptor *)(ldtstore + LSOL26CALLS_SEL),
	    (struct gate_descriptor *)(ldtstore + LSYS5CALLS_SEL),
	    sizeof (struct gate_descriptor));
	memcpy((struct gate_descriptor *)(ldtstore + LBSDICALLS_SEL),
	    (struct gate_descriptor *)(ldtstore + LSYS5CALLS_SEL),
	    sizeof (struct gate_descriptor));

	/* exceptions */
	for (x = 0; x < 32; x++) {
		ist = (x == 8) ? 1 : 0;
		setgate(&idt[x], IDTVEC(exceptions)[x], ist, SDT_SYS386IGT,
		    (x == 3 || x == 4) ? SEL_UPL : SEL_KPL,
		    GSEL(GCODE_SEL, SEL_KPL));
		idt_allocmap[x] = 1;
	}

	/* new-style interrupt gate for syscalls */
	setgate(&idt[128], &IDTVEC(osyscall), 0, SDT_SYS386IGT, SEL_UPL,
	    GSEL(GCODE_SEL, SEL_KPL));
	idt_allocmap[128] = 1;

	setregion(&region, gdtstore, DYNSEL_START - 1);
	lgdt(&region);

	cpu_init_idt();

#ifdef DDB
	db_machine_init();
	ddb_init();
	if (boothowto & RB_KDB)
		Debugger();
#endif
#ifdef KGDB
	kgdb_port_init();
	if (boothowto & RB_KDB) {
		kgdb_debug_init = 1;
		kgdb_connect(1);
	}
#endif

	intr_default_setup();

	softintr_init();
	splraise(IPL_IPI);
	enable_intr();

        /* Make sure maxproc is sane */ 
        if (maxproc > cpu_maxproc())
                maxproc = cpu_maxproc();
}
Beispiel #25
0
static int display(Hashmap *a) {
        Iterator i;
        Group *g;
        Group **array;
        signed path_columns;
        unsigned rows, n = 0, j, maxtcpu = 0, maxtpath = 0;
        char buffer[MAX3(21, FORMAT_BYTES_MAX, FORMAT_TIMESPAN_MAX)];

        assert(a);

        /* Set cursor to top left corner and clear screen */
        if (on_tty())
                fputs("\033[H"
                      "\033[2J", stdout);

        array = alloca(sizeof(Group*) * hashmap_size(a));

        HASHMAP_FOREACH(g, a, i)
                if (g->n_tasks_valid || g->cpu_valid || g->memory_valid || g->io_valid)
                        array[n++] = g;

        qsort_safe(array, n, sizeof(Group*), group_compare);

        /* Find the longest names in one run */
        for (j = 0; j < n; j++) {
                unsigned cputlen, pathtlen;

                format_timespan(buffer, sizeof(buffer), (nsec_t) (array[j]->cpu_usage / NSEC_PER_USEC), 0);
                cputlen = strlen(buffer);
                maxtcpu = MAX(maxtcpu, cputlen);
                pathtlen = strlen(array[j]->path);
                maxtpath = MAX(maxtpath, pathtlen);
        }

        if (arg_cpu_type == CPU_PERCENT)
                snprintf(buffer, sizeof(buffer), "%6s", "%CPU");
        else
                snprintf(buffer, sizeof(buffer), "%*s", maxtcpu, "CPU Time");

        rows = lines();
        if (rows <= 10)
                rows = 10;

        if (on_tty()) {
                path_columns = columns() - 36 - strlen(buffer);
                if (path_columns < 10)
                        path_columns = 10;

                printf("%s%-*s%s %s%7s%s %s%s%s %s%8s%s %s%8s%s %s%8s%s\n\n",
                       arg_order == ORDER_PATH ? ON : "", path_columns, "Path",
                       arg_order == ORDER_PATH ? OFF : "",
                       arg_order == ORDER_TASKS ? ON : "", "Tasks",
                       arg_order == ORDER_TASKS ? OFF : "",
                       arg_order == ORDER_CPU ? ON : "", buffer,
                       arg_order == ORDER_CPU ? OFF : "",
                       arg_order == ORDER_MEMORY ? ON : "", "Memory",
                       arg_order == ORDER_MEMORY ? OFF : "",
                       arg_order == ORDER_IO ? ON : "", "Input/s",
                       arg_order == ORDER_IO ? OFF : "",
                       arg_order == ORDER_IO ? ON : "", "Output/s",
                       arg_order == ORDER_IO ? OFF : "");
        } else
                path_columns = maxtpath;

        for (j = 0; j < n; j++) {
                char *p;

                if (on_tty() && j + 5 > rows)
                        break;

                g = array[j];

                p = ellipsize(g->path, path_columns, 33);
                printf("%-*s", path_columns, p ? p : g->path);
                free(p);

                if (g->n_tasks_valid)
                        printf(" %7u", g->n_tasks);
                else
                        fputs("       -", stdout);

                if (arg_cpu_type == CPU_PERCENT) {
                        if (g->cpu_valid)
                                printf(" %6.1f", g->cpu_fraction*100);
                        else
                                fputs("      -", stdout);
                } else
                        printf(" %*s", maxtcpu, format_timespan(buffer, sizeof(buffer), (nsec_t) (g->cpu_usage / NSEC_PER_USEC), 0));

                if (g->memory_valid)
                        printf(" %8s", format_bytes(buffer, sizeof(buffer), g->memory));
                else
                        fputs("        -", stdout);

                if (g->io_valid) {
                        printf(" %8s",
                               format_bytes(buffer, sizeof(buffer), g->io_input_bps));
                        printf(" %8s",
                               format_bytes(buffer, sizeof(buffer), g->io_output_bps));
                } else
                        fputs("        -        -", stdout);

                putchar('\n');
        }

        return 0;
}
Beispiel #26
0
/*
 * cpu_startup: allocate memory for variable-sized tables,
 * initialize CPU, and do autoconfiguration.
 */
void
cpu_startup(void)
{
	u_quad_t vmememsize;
	vaddr_t minaddr, maxaddr;
	char pbuf[9];
	u_int i;
#ifdef DEBUG
	extern int pmapdebug;
	int opmapdebug = pmapdebug;

	pmapdebug = 0;
#endif

	/*
	 * If we have an FPU, initialise the cached idle frame
	 */
	if (fputype != FPU_NONE)
		m68k_make_fpu_idle_frame();

	/*
	 * Initialize the kernel crash dump header.
	 */
	cpu_init_kcore_hdr();

	/*
	 * Good {morning,afternoon,evening,night}.
	 */
	printf("%s%s", copyright, version);
	identifycpu();
	format_bytes(pbuf, sizeof(pbuf), ctob(physmem));
	printf("total memory = %s", pbuf);

	for (vmememsize = 0, i = 1; i < mem_cluster_cnt; i++)
		vmememsize += mem_clusters[i].size;
	if (vmememsize != 0) {
		format_bytes(pbuf, sizeof(pbuf), mem_clusters[0].size);
		printf(" (%s on-board", pbuf);
		format_bytes(pbuf, sizeof(pbuf), vmememsize);
		printf(", %s VMEbus)", pbuf);
	}

	printf("\n");

	minaddr = 0;
	/*
	 * Allocate a submap for physio
	 */
	phys_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr,
	    VM_PHYS_SIZE, 0, false, NULL);

	/*
	 * Finally, allocate mbuf cluster submap.
	 */
	mb_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr,
	    nmbclusters * mclbytes, VM_MAP_INTRSAFE, false, NULL);

#ifdef DEBUG
	pmapdebug = opmapdebug;
#endif
	format_bytes(pbuf, sizeof(pbuf), ptoa(uvmexp.free));
	printf("avail memory = %s\n", pbuf);

	/*
	 * Set up CPU-specific registers, cache, etc.
	 */
	initcpu();
}
Beispiel #27
0
int journal_directory_vacuum(
                const char *directory,
                uint64_t max_use,
                uint64_t n_max_files,
                usec_t max_retention_usec,
                usec_t *oldest_usec,
                bool verbose) {

        _cleanup_closedir_ DIR *d = NULL;
        struct vacuum_info *list = NULL;
        unsigned n_list = 0, i, n_active_files = 0;
        size_t n_allocated = 0;
        uint64_t sum = 0, freed = 0;
        usec_t retention_limit = 0;
        char sbytes[FORMAT_BYTES_MAX];
        struct dirent *de;
        int r;

        assert(directory);

        if (max_use <= 0 && max_retention_usec <= 0 && n_max_files <= 0)
                return 0;

        if (max_retention_usec > 0) {
                retention_limit = now(CLOCK_REALTIME);
                if (retention_limit > max_retention_usec)
                        retention_limit -= max_retention_usec;
                else
                        max_retention_usec = retention_limit = 0;
        }

        d = opendir(directory);
        if (!d)
                return -errno;

        FOREACH_DIRENT_ALL(de, d, r = -errno; goto finish) {

                unsigned long long seqnum = 0, realtime;
                _cleanup_free_ char *p = NULL;
                sd_id128_t seqnum_id;
                bool have_seqnum;
                uint64_t size;
                struct stat st;
                size_t q;

                if (fstatat(dirfd(d), de->d_name, &st, AT_SYMLINK_NOFOLLOW) < 0) {
                        log_debug_errno(errno, "Failed to stat file %s while vacuuming, ignoring: %m", de->d_name);
                        continue;
                }

                if (!S_ISREG(st.st_mode))
                        continue;

                q = strlen(de->d_name);

                if (endswith(de->d_name, ".journal")) {

                        /* Vacuum archived files. Active files are
                         * left around */

                        if (q < 1 + 32 + 1 + 16 + 1 + 16 + 8) {
                                n_active_files++;
                                continue;
                        }

                        if (de->d_name[q-8-16-1] != '-' ||
                            de->d_name[q-8-16-1-16-1] != '-' ||
                            de->d_name[q-8-16-1-16-1-32-1] != '@') {
                                n_active_files++;
                                continue;
                        }

                        p = strdup(de->d_name);
                        if (!p) {
                                r = -ENOMEM;
                                goto finish;
                        }

                        de->d_name[q-8-16-1-16-1] = 0;
                        if (sd_id128_from_string(de->d_name + q-8-16-1-16-1-32, &seqnum_id) < 0) {
                                n_active_files++;
                                continue;
                        }

                        if (sscanf(de->d_name + q-8-16-1-16, "%16llx-%16llx.journal", &seqnum, &realtime) != 2) {
                                n_active_files++;
                                continue;
                        }

                        have_seqnum = true;

                } else if (endswith(de->d_name, ".journal~")) {
                        unsigned long long tmp;

                        /* Vacuum corrupted files */

                        if (q < 1 + 16 + 1 + 16 + 8 + 1) {
                                n_active_files ++;
                                continue;
                        }

                        if (de->d_name[q-1-8-16-1] != '-' ||
                            de->d_name[q-1-8-16-1-16-1] != '@') {
                                n_active_files ++;
                                continue;
                        }

                        p = strdup(de->d_name);
                        if (!p) {
                                r = -ENOMEM;
                                goto finish;
                        }

                        if (sscanf(de->d_name + q-1-8-16-1-16, "%16llx-%16llx.journal~", &realtime, &tmp) != 2) {
                                n_active_files ++;
                                continue;
                        }

                        have_seqnum = false;
                } else {
                        /* We do not vacuum unknown files! */
                        log_debug("Not vacuuming unknown file %s.", de->d_name);
                        continue;
                }

                size = 512UL * (uint64_t) st.st_blocks;

                r = journal_file_empty(dirfd(d), p);
                if (r < 0) {
                        log_debug_errno(r, "Failed check if %s is empty, ignoring: %m", p);
                        continue;
                }
                if (r > 0) {
                        /* Always vacuum empty non-online files. */

                        if (unlinkat(dirfd(d), p, 0) >= 0) {

                                log_full(verbose ? LOG_INFO : LOG_DEBUG,
                                         "Deleted empty archived journal %s/%s (%s).", directory, p, format_bytes(sbytes, sizeof(sbytes), size));

                                freed += size;
                        } else if (errno != ENOENT)
                                log_warning_errno(errno, "Failed to delete empty archived journal %s/%s: %m", directory, p);

                        continue;
                }

                patch_realtime(dirfd(d), p, &st, &realtime);

                if (!GREEDY_REALLOC(list, n_allocated, n_list + 1)) {
                        r = -ENOMEM;
                        goto finish;
                }

                list[n_list].filename = p;
                list[n_list].usage = size;
                list[n_list].seqnum = seqnum;
                list[n_list].realtime = realtime;
                list[n_list].seqnum_id = seqnum_id;
                list[n_list].have_seqnum = have_seqnum;
                n_list ++;

                p = NULL;
                sum += size;
        }

        qsort_safe(list, n_list, sizeof(struct vacuum_info), vacuum_compare);

        for (i = 0; i < n_list; i++) {
                unsigned left;

                left = n_active_files + n_list - i;

                if ((max_retention_usec <= 0 || list[i].realtime >= retention_limit) &&
                    (max_use <= 0 || sum <= max_use) &&
                    (n_max_files <= 0 || left <= n_max_files))
                        break;

                if (unlinkat(dirfd(d), list[i].filename, 0) >= 0) {
                        log_full(verbose ? LOG_INFO : LOG_DEBUG, "Deleted archived journal %s/%s (%s).", directory, list[i].filename, format_bytes(sbytes, sizeof(sbytes), list[i].usage));
                        freed += list[i].usage;

                        if (list[i].usage < sum)
                                sum -= list[i].usage;
                        else
                                sum = 0;

                } else if (errno != ENOENT)
                        log_warning_errno(errno, "Failed to delete archived journal %s/%s: %m", directory, list[i].filename);
        }

        if (oldest_usec && i < n_list && (*oldest_usec == 0 || list[i].realtime < *oldest_usec))
                *oldest_usec = list[i].realtime;

        r = 0;

finish:
        for (i = 0; i < n_list; i++)
                free(list[i].filename);
        free(list);

        log_full(verbose ? LOG_INFO : LOG_DEBUG, "Vacuuming done, freed %s of archived journals on disk.", format_bytes(sbytes, sizeof(sbytes), freed));

        return r;
}
Beispiel #28
0
int setup_machine_directory(uint64_t size, sd_bus_error *error) {
        _cleanup_release_lock_file_ LockFile lock_file = LOCK_FILE_INIT;
        struct loop_info64 info = {
                .lo_flags = LO_FLAGS_AUTOCLEAR,
        };
        _cleanup_close_ int fd = -1, control = -1, loop = -1;
        _cleanup_free_ char* loopdev = NULL;
        char tmpdir[] = "/tmp/machine-pool.XXXXXX", *mntdir = NULL;
        bool tmpdir_made = false, mntdir_made = false, mntdir_mounted = false;
        char buf[FORMAT_BYTES_MAX];
        int r, nr = -1;

        /* btrfs cannot handle file systems < 16M, hence use this as minimum */
        if (size == (uint64_t) -1)
                size = VAR_LIB_MACHINES_SIZE_START;
        else if (size < 16*1024*1024)
                size = 16*1024*1024;

        /* Make sure we only set the directory up once at a time */
        r = make_lock_file("/run/systemd/machines.lock", LOCK_EX, &lock_file);
        if (r < 0)
                return r;

        r = check_btrfs();
        if (r < 0)
                return sd_bus_error_set_errnof(error, r, "Failed to determine whether /var/lib/machines is located on btrfs: %m");
        if (r > 0) {
                (void) btrfs_subvol_make_label("/var/lib/machines");

                r = btrfs_quota_enable("/var/lib/machines", true);
                if (r < 0)
                        log_warning_errno(r, "Failed to enable quota for /var/lib/machines, ignoring: %m");

                r = btrfs_subvol_auto_qgroup("/var/lib/machines", 0, true);
                if (r < 0)
                        log_warning_errno(r, "Failed to set up default quota hierarchy for /var/lib/machines, ignoring: %m");

                return 1;
        }

        if (path_is_mount_point("/var/lib/machines", AT_SYMLINK_FOLLOW) > 0) {
                log_debug("/var/lib/machines is already a mount point, not creating loopback file for it.");
                return 0;
        }

        r = dir_is_populated("/var/lib/machines");
        if (r < 0 && r != -ENOENT)
                return r;
        if (r > 0) {
                log_debug("/var/log/machines is already populated, not creating loopback file for it.");
                return 0;
        }

        r = mkfs_exists("btrfs");
        if (r == -ENOENT) {
                log_debug("mkfs.btrfs is missing, cannot create loopback file for /var/lib/machines.");
                return 0;
        }
        if (r < 0)
                return r;

        fd = setup_machine_raw(size, error);
        if (fd < 0)
                return fd;

        control = open("/dev/loop-control", O_RDWR|O_CLOEXEC|O_NOCTTY|O_NONBLOCK);
        if (control < 0)
                return sd_bus_error_set_errnof(error, errno, "Failed to open /dev/loop-control: %m");

        nr = ioctl(control, LOOP_CTL_GET_FREE);
        if (nr < 0)
                return sd_bus_error_set_errnof(error, errno, "Failed to allocate loop device: %m");

        if (asprintf(&loopdev, "/dev/loop%i", nr) < 0) {
                r = -ENOMEM;
                goto fail;
        }

        loop = open(loopdev, O_CLOEXEC|O_RDWR|O_NOCTTY|O_NONBLOCK);
        if (loop < 0) {
                r = sd_bus_error_set_errnof(error, errno, "Failed to open loopback device: %m");
                goto fail;
        }

        if (ioctl(loop, LOOP_SET_FD, fd) < 0) {
                r = sd_bus_error_set_errnof(error, errno, "Failed to bind loopback device: %m");
                goto fail;
        }

        if (ioctl(loop, LOOP_SET_STATUS64, &info) < 0) {
                r = sd_bus_error_set_errnof(error, errno, "Failed to enable auto-clear for loopback device: %m");
                goto fail;
        }

        /* We need to make sure the new /var/lib/machines directory
         * has an access mode of 0700 at the time it is first made
         * available. mkfs will create it with 0755 however. Hence,
         * let's mount the directory into an inaccessible directory
         * below /tmp first, fix the access mode, and move it to the
         * public place then. */

        if (!mkdtemp(tmpdir)) {
                r = sd_bus_error_set_errnof(error, errno, "Failed to create temporary mount parent directory: %m");
                goto fail;
        }
        tmpdir_made = true;

        mntdir = strjoina(tmpdir, "/mnt");
        if (mkdir(mntdir, 0700) < 0) {
                r = sd_bus_error_set_errnof(error, errno, "Failed to create temporary mount directory: %m");
                goto fail;
        }
        mntdir_made = true;

        if (mount(loopdev, mntdir, "btrfs", 0, NULL) < 0) {
                r = sd_bus_error_set_errnof(error, errno, "Failed to mount loopback device: %m");
                goto fail;
        }
        mntdir_mounted = true;

        r = btrfs_quota_enable(mntdir, true);
        if (r < 0)
                log_warning_errno(r, "Failed to enable quota, ignoring: %m");

        r = btrfs_subvol_auto_qgroup(mntdir, 0, true);
        if (r < 0)
                log_warning_errno(r, "Failed to set up default quota hierarchy, ignoring: %m");

        if (chmod(mntdir, 0700) < 0) {
                r = sd_bus_error_set_errnof(error, errno, "Failed to fix owner: %m");
                goto fail;
        }

        (void) mkdir_p_label("/var/lib/machines", 0700);

        if (mount(mntdir, "/var/lib/machines", NULL, MS_BIND, NULL) < 0) {
                r = sd_bus_error_set_errnof(error, errno, "Failed to mount directory into right place: %m");
                goto fail;
        }

        (void) syncfs(fd);

        log_info("Set up /var/lib/machines as btrfs loopback file system of size %s mounted on /var/lib/machines.raw.", format_bytes(buf, sizeof(buf), size));

        (void) umount2(mntdir, MNT_DETACH);
        (void) rmdir(mntdir);
        (void) rmdir(tmpdir);

        return 1;

fail:
        if (mntdir_mounted)
                (void) umount2(mntdir, MNT_DETACH);

        if (mntdir_made)
                (void) rmdir(mntdir);
        if (tmpdir_made)
                (void) rmdir(tmpdir);

        if (loop >= 0) {
                (void) ioctl(loop, LOOP_CLR_FD);
                loop = safe_close(loop);
        }

        if (control >= 0 && nr >= 0)
                (void) ioctl(control, LOOP_CTL_REMOVE, nr);

        return r;
}
Beispiel #29
0
/* ARGSUSED */
void
flash_attach(device_t parent, device_t self, void *aux)
{
	struct flash_softc * const sc = device_private(self);
	struct flash_attach_args * const faa = aux;
	char pbuf[2][sizeof("9999 KB")];

	sc->sc_dev = self;
	sc->sc_parent_dev = parent;
	sc->flash_if = faa->flash_if;
	sc->sc_partinfo = faa->partinfo;
	sc->hw_softc = device_private(parent);

	format_bytes(pbuf[0], sizeof(pbuf[0]), sc->sc_partinfo.part_size);
	format_bytes(pbuf[1], sizeof(pbuf[1]), sc->flash_if->erasesize);

	aprint_naive("\n");

	switch (sc->flash_if->type) {
	case FLASH_TYPE_NOR:
		aprint_normal(": NOR flash partition size %s, offset %#jx",
			pbuf[0], (uintmax_t )sc->sc_partinfo.part_offset);
		break;

	case FLASH_TYPE_NAND:
		aprint_normal(": NAND flash partition size %s, offset %#jx",
			pbuf[0], (uintmax_t )sc->sc_partinfo.part_offset);
		break;

	default:
		aprint_normal(": %s unknown flash", pbuf[0]);
	}

	if (sc->sc_partinfo.part_flags & FLASH_PART_READONLY) {
		sc->sc_readonly = true;
		aprint_normal(", read only");
	} else {
		sc->sc_readonly = false;
	}

	aprint_normal("\n");

	if (sc->sc_partinfo.part_size == 0) {
		aprint_error_dev(self,
		    "partition size must be larger than 0\n");
		return;
	}

	switch (sc->flash_if->type) {
	case FLASH_TYPE_NOR:
		aprint_normal_dev(sc->sc_dev,
		    "erase size %s bytes, write size %d bytes\n",
		    pbuf[1], sc->flash_if->writesize);
		break;

	case FLASH_TYPE_NAND:
	default:
		aprint_normal_dev(sc->sc_dev,
		    "erase size %s, page size %d bytes, write size %d bytes\n",
		    pbuf[1], sc->flash_if->page_size,
		    sc->flash_if->writesize);
		break;
	}

	if (!pmf_device_register1(sc->sc_dev, NULL, NULL, flash_shutdown))
		aprint_error_dev(sc->sc_dev,
		    "couldn't establish power handler\n");
}
Beispiel #30
0
/*
 * Allocate memory for variable-sized tables,
 */
void
cpu_startup()
{
	unsigned i;
	int base, residual;
	vaddr_t minaddr, maxaddr;
	vsize_t size;
	char pbuf[9];

	/*
	 * Good {morning,afternoon,evening,night}.
	 */
	printf(version);
	printf("%s\n", cpu_model);
	format_bytes(pbuf, sizeof(pbuf), ctob(physmem));
	printf("%s memory", pbuf);

	/*
	 * Allocate virtual address space for file I/O buffers.
	 * Note they are different than the array of headers, 'buf',
	 * and usually occupy more virtual memory than physical.
	 */
	size = MAXBSIZE * nbuf;
	if (uvm_map(kernel_map, (vaddr_t *)&buffers, round_page(size),
	    NULL, UVM_UNKNOWN_OFFSET, 0, UVM_MAPFLAG(UVM_PROT_NONE,
		UVM_PROT_NONE, UVM_INH_NONE, UVM_ADV_NORMAL, 0)) != 0)
		panic("startup: cannot allocate VM for buffers");
	minaddr = (vaddr_t)buffers;
	base = bufpages / nbuf;
	residual = bufpages % nbuf;
	for (i = 0; i < nbuf; i++) {
		vsize_t curbufsize;
		vaddr_t curbuf;
		struct vm_page *pg;

		/*
		 * Each buffer has MAXBSIZE bytes of VM space allocated.  Of
		 * that MAXBSIZE space, we allocate and map (base+1) pages
		 * for the first "residual" buffers, and then we allocate
		 * "base" pages for the rest.
		 */
		curbuf = (vaddr_t) buffers + (i * MAXBSIZE);
		curbufsize = NBPG * ((i < residual) ? (base + 1) : base);

		while (curbufsize) {
			pg = uvm_pagealloc(NULL, 0, NULL, 0);
			if (pg == NULL)
				panic("cpu_startup: not enough memory for "
				    "buffer cache");
			pmap_kenter_pa(curbuf, VM_PAGE_TO_PHYS(pg),
			    VM_PROT_READ|VM_PROT_WRITE);
			curbuf += PAGE_SIZE;
			curbufsize -= PAGE_SIZE;
		}
	}
	pmap_update(pmap_kernel());

	/*
	 * Allocate a submap for exec arguments.  This map effectively
	 * limits the number of processes exec'ing at any time.
	 */
	exec_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr,
	    16 * NCARGS, VM_MAP_PAGEABLE, FALSE, NULL);
	/*
	 * Allocate a submap for physio.
	 */
	phys_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr,
	    VM_PHYS_SIZE, 0, FALSE, NULL);

	/*
	 * (No need to allocate an mbuf cluster submap.  Mbuf clusters
	 * are allocated via the pool allocator, and we use KSEG to
	 * map those pages.)
	 */

	format_bytes(pbuf, sizeof(pbuf), ptoa(uvmexp.free));
	printf(", %s free", pbuf);
	format_bytes(pbuf, sizeof(pbuf), bufpages * NBPG);
	printf(", %s in %d buffers\n", pbuf, nbuf);

	/*
	 * Set up buffers, so they can be used to read disk labels.
	 */
	bufinit();
}