uint32_t av_get_random_seed(void) { uint32_t seed; #if HAVE_CRYPTGENRANDOM HCRYPTPROV provider; if (CryptAcquireContext(&provider, NULL, NULL, PROV_RSA_FULL, CRYPT_VERIFYCONTEXT | CRYPT_SILENT)) { BOOL ret = CryptGenRandom(provider, sizeof(seed), (PBYTE) &seed); CryptReleaseContext(provider, 0); if (ret) return seed; } #endif #if HAVE_ARC4RANDOM return arc4random(); #endif if (read_random(&seed, "/dev/urandom") == sizeof(seed)) return seed; if (read_random(&seed, "/dev/random") == sizeof(seed)) return seed; return get_generic_seed(); }
uint32_t rtp_ssrc(void) { uint32_t seed; if (read_random(&seed, "/dev/urandom") == sizeof(seed)) return seed; if (read_random(&seed, "/dev/random") == sizeof(seed)) return seed; return (uint32_t)rand(); }
uint32_t av_get_random_seed(void) { uint32_t seed; if (read_random(&seed, "/dev/urandom") == sizeof(seed)) return seed; if (read_random(&seed, "/dev/random") == sizeof(seed)) return seed; return get_generic_seed(); }
uint32_t av_get_random_seed(void) { uint32_t seed; #ifdef _XBOX if (read_random(&seed, "/dev/urandom") == sizeof(seed)) return seed; if (read_random(&seed, "/dev/random") == sizeof(seed)) return seed; return get_generic_seed(); #else return 0; #endif }
int test_case2() { char* file_names[] = { "/home/guoqiang/code/0.0.2/MediaSvr/02A805D023585316849992F61018FD8A360239D6.dat", "/home/guoqiang/code/NewMediaServer/new_ms/MediaSvr/02A805D023585316849992F61018FD8A360239D6.dat", "/home/guoqiang/code/NewMediaServer/new_ms/MediaSvr/14C39AB98D3205AC196308FAA27B3485BCB7109C.dat", "/home/guoqiang/code/NewMediaServer/new_ms1/MediaSvr/02A805D023585316849992F61018FD8A360239D6.dat", "/home/guoqiang/code/NewMediaServer/new_ms1/MediaSvr/C102015F1FD646DC1534F8244C4C4A6A2AF0C425.dat", "/home/guoqiang/code/NewMediaServer/new_ms1/MediaSvr/14C39AB98D3205AC196308FAA27B3485BCB7109C.dat", "/home/guoqiang/code/NewMediaServer/xiongm/MediaSvr/02A805D023585316849992F61018FD8A360239D6.dat", "/home/guoqiang/code/NewMediaServer/xiongm/MediaSvr/C102015F1FD646DC1534F8244C4C4A6A2AF0C425.dat", "/home/guoqiang/code/NewMediaServer/xiongm/MediaSvr/14C39AB98D3205AC196308FAA27B3485BCB7109C.dat", "/home/html/02A805D023585316849992F61018FD8A360239D6.dat", "/home/html/C102015F1FD646DC1534F8244C4C4A6A2AF0C425.dat", "/home/html/B421F1690EDF4115D39A0531770FF1D46B57F9FC.dat", }; int index = 0; for(index=0; index<(int)(sizeof(file_names)/sizeof(file_names[0])); index++) { read_random(file_names[index]); } return 0; }
double experiment(qint64 len, int fdRef, int fdMiss) { std::vector<int> v(len); std::vector<int> vRand(len); int samples = 10; double rate = 0.0; for (int i = 0; i < samples; i++) { long vRefI, vMissI, vRefF, vMissF; int ret = 0; write_linear(v); write_random(vRand); read_linear(v); ret = read(fdRef, &vRefI, sizeof(vRefI)); ret = read(fdMiss, &vMissI, sizeof(vMissI)); read_random(v, vRand); ret = read(fdRef, &vRefF, sizeof(vRefF)); ret = read(fdMiss, &vMissF, sizeof(vMissF)); vMissF -= vMissI; vRefF -= vRefI; double delta = (double)vMissF / vRefF; qDebug() << "ret" << ret << " L1 Cache Accesses: "<< vRefF << " L1 Cache Misses: "<< vMissF <<" value " << delta; rate += delta; } return rate / samples; }
void uuid_generate_random(uuid_t out) { read_random(out, sizeof(uuid_t)); out[6] = (out[6] & 0x0F) | 0x40; out[8] = (out[8] & 0x3F) | 0x80; }
/* * Stir our S-box. */ static void arc4_randomstir(void) { u_int8_t key[ARC4_KEYBYTES]; int n; struct timeval tv_now; /* * XXX: FIX!! This isn't brilliant. Need more confidence. * This returns zero entropy before random(4) is seeded. */ (void)read_random(key, ARC4_KEYBYTES); getmicrouptime(&tv_now); mtx_lock(&arc4_mtx); for (n = 0; n < 256; n++) { arc4_j = (arc4_j + arc4_sbox[n] + key[n]) % 256; arc4_swap(&arc4_sbox[n], &arc4_sbox[arc4_j]); } arc4_i = arc4_j = 0; /* Reset for next reseed cycle. */ arc4_t_reseed = tv_now.tv_sec + ARC4_RESEED_SECONDS; arc4_numruns = 0; /* * Throw away the first N words of output, as suggested in the * paper "Weaknesses in the Key Scheduling Algorithm of RC4" * by Fluher, Mantin, and Shamir. (N = 256 in our case.) * * http://dl.acm.org/citation.cfm?id=646557.694759 */ for (n = 0; n < 256*4; n++) arc4_randbyte(); mtx_unlock(&arc4_mtx); }
int Lpx_PCB_alloc( struct socket *so, struct lpxpcb *head, struct proc *td ) { register struct lpxpcb *lpxp; DEBUG_CALL(4, ("Lpx_PCB_alloc\n")); MALLOC(lpxp, struct lpxpcb *, sizeof *lpxp, M_PCB, M_WAITOK); if (lpxp == NULL) { DEBUG_CALL(0, ("Lpx_PCB_alloc:==> Failed\n")); return (ENOBUFS); } bzero(lpxp, sizeof(*lpxp)); lpxp->lpxp_socket = so; if (lpxcksum) lpxp->lpxp_flags |= LPXP_CHECKSUM; read_random(&lpxp->lpxp_messageid, sizeof(lpxp->lpxp_messageid)); insque(lpxp, head); so->so_pcb = (caddr_t)lpxp; so->so_options |= SO_DONTROUTE; return (0); }
/* * Stir our S-box. */ static void arc4_randomstir (void) { u_int8_t key[256]; int r, n; /* * XXX read_random() returns unsafe numbers if the entropy * device is not loaded -- MarkM. */ #if 0 r = read_random(key, ARC4_KEYBYTES); #else r = 0; /*XXX*/ #endif /* If r == 0 || -1, just use what was on the stack. */ if (r > 0) { for (n = r; n < sizeof(key); n++) key[n] = key[n % r]; } for (n = 0; n < 256; n++) { arc4_j = (arc4_j + arc4_sbox[n] + key[n]) % 256; arc4_swap(&arc4_sbox[n], &arc4_sbox[arc4_j]); } /* Reset for next reseed cycle. */ microtime(&arc4_tv_nextreseed); arc4_tv_nextreseed.tv_sec += ARC4_RESEED_SECONDS; arc4_numruns = 0; }
inline void NDAS_uuid_generate_random(NDAS_uuid_t out) { read_random(out, sizeof(NDAS_uuid_t)); out[6] = (out[6] & 0x0F) | 0x40; out[8] = (out[8] & 0x3F) | 0x80; }
int Lpx_PCB_alloc( struct socket *so, struct lpxpcb *head, struct proc *td ) { register struct lpxpcb *lpxp; DEBUG_PRINT(DEBUG_MASK_PCB_TRACE, ("Lpx_PCB_alloc\n")); MALLOC(lpxp, struct lpxpcb *, sizeof *lpxp, M_PCB, M_WAITOK); if (lpxp == NULL) { DEBUG_PRINT(DEBUG_MASK_PCB_ERROR, ("Lpx_PCB_alloc:==> Failed\n")); return (ENOBUFS); } bzero(lpxp, sizeof(*lpxp)); lpxp->lpxp_socket = so; if (lpxcksum) lpxp->lpxp_flags |= LPXP_CHECKSUM; read_random(&lpxp->lpxp_messageid, sizeof(lpxp->lpxp_messageid)); lck_rw_lock_exclusive(head->lpxp_list_rw); insque(lpxp, head); lck_rw_unlock_exclusive(head->lpxp_list_rw); lpxp->lpxp_head = head; so->so_pcb = (caddr_t)lpxp; //so->so_options |= SO_DONTROUTE; if (so->so_proto->pr_flags & PR_PCBLOCK) { if (head == &lpx_stream_pcb) { lpxp->lpxp_mtx = lck_mtx_alloc_init(stream_mtx_grp, stream_mtx_attr); lpxp->lpxp_mtx_grp = stream_mtx_grp; } else { lpxp->lpxp_mtx = lck_mtx_alloc_init(datagram_mtx_grp, datagram_mtx_attr); lpxp->lpxp_mtx_grp = datagram_mtx_grp; } if (lpxp->lpxp_mtx == NULL) { DEBUG_PRINT(DEBUG_MASK_PCB_ERROR, ("Lpx_PCB_alloc: can't alloc mutex! so=%p\n", so)); FREE(lpxp, M_PCB); return(ENOMEM); } } return (0); }
static bool signature() { return true; int chk = 1; FILE *fp; fp = fopen ( eEnv::resolve("${sysconfdir}/stb/info/model").c_str(), "r"); if (fp) { char line[256]; int n; fgets(line, sizeof(line), fp); if ((n = strlen(line)) && line[n - 1] == '\n') line[n - 1] = '\0'; fclose(fp); if (strstr(line,"dm7025")) chk = 0; } if (chk) { eTPM tpm; unsigned char rnd[CLEN]; /* read random bytes */ if (!read_random(rnd, CLEN)) return 1; unsigned char level2_mod[128]; unsigned char level3_mod[128]; unsigned char buf[128]; std::string challenge((char*)rnd, CLEN); std::string response = tpm.challenge(challenge); unsigned int len = response.size(); unsigned char val[len]; if ( len != 128 ) return false; memcpy(val, response.c_str(), len); std::string cert = tpm.getCert(eTPM::TPMD_DT_LEVEL2_CERT); if ( cert.size() != 210 || !validate_cert(level2_mod, (const unsigned char*) cert.c_str(), tpm_root_mod)) return false; cert = tpm.getCert(eTPM::TPMD_DT_LEVEL3_CERT); if ( cert.size() != 210 || !validate_cert(level3_mod, (const unsigned char*) cert.c_str(), level2_mod)) return false; if (!decrypt_block(buf, val, 128, level3_mod)) return false; if (memcmp(&buf[80], rnd, CLEN)) return false; return true; } else return true; }
u_int32_t pf_new_isn(struct pf_state *s) { MD5_CTX isn_ctx; u_int32_t md5_buffer[4]; u_int32_t new_isn; struct pf_state_host *src, *dst; /* Seed if this is the first use, reseed if requested. */ if (pf_isn_last_reseed == 0) { read_random(&pf_isn_secret, sizeof(pf_isn_secret)); pf_isn_last_reseed = ticks; } if (s->direction == PF_IN) { src = &s->ext; dst = &s->gwy; } else { src = &s->lan; dst = &s->ext; } /* Compute the md5 hash and return the ISN. */ MD5Init(&isn_ctx); MD5Update(&isn_ctx, (u_char *) &dst->port, sizeof(u_short)); MD5Update(&isn_ctx, (u_char *) &src->port, sizeof(u_short)); #ifdef INET6 if (s->af == AF_INET6) { MD5Update(&isn_ctx, (u_char *) &dst->addr, sizeof(struct in6_addr)); MD5Update(&isn_ctx, (u_char *) &src->addr, sizeof(struct in6_addr)); } else #endif { MD5Update(&isn_ctx, (u_char *) &dst->addr, sizeof(struct in_addr)); MD5Update(&isn_ctx, (u_char *) &src->addr, sizeof(struct in_addr)); } MD5Update(&isn_ctx, (u_char *) &pf_isn_secret, sizeof(pf_isn_secret)); MD5Final((u_char *) &md5_buffer, &isn_ctx); new_isn = (tcp_seq) md5_buffer[0]; pf_isn_offset += ISN_STATIC_INCREMENT + (arc4random() & ISN_RANDOM_INCREMENT); new_isn += pf_isn_offset; return (new_isn); }
void holding_deal(UCHAR *point_RX)//point_rx带校验码 { USHORT addr0 = 0,count0 = 0; USHORT i = 0,CRC = 0; UCHAR HIGH = 0,LOW = 0; UCHAR NUM=0; // addr0 = point_RX[2]; // addr0 = addr0<<8+point_RX[3]; // count0 = point_RX[4]; //寄存器数 // count0 = count0<<8+point_RX[5]; // count0 = count0*2; //字节数 //for(i=0;i<8;i++) //{ // send(point_RX[i]); //} if(read_hold == point_RX[1]) { holding[0] = point_RX[0]; holding[1] = point_RX[1]; holding[2] = count0; NUM=sizeof(save1); for(i=0;i<NUM;i++) { holding[i+3] = read_random(area1+point_RX[3]*NUM+i); } // for(i=0;i<15;i++) // holding[i+3] = i+1; // for(i=9;i<56;i++) // { // holding[2*i] = 0; // holding[2*i+1] = 1+i; // } CRC = usMBCRC16(holding,sizeof(save1)+3); HIGH = CRC>>8; LOW = CRC; for(i=0;i<NUM+3;i++) { send(holding[i]); } send(LOW); send(HIGH); // tx_count = count0 + 3; }
static int sysctl_kern_arnd(SYSCTL_HANDLER_ARGS) { char buf[256]; size_t len; /*- * This is one of the very few legitimate uses of read_random(9). * Use of arc4random(9) is not recommended as that will ignore * an unsafe (i.e. unseeded) random(4). * * If random(4) is not seeded, then this returns 0, so the * sysctl will return a zero-length buffer. */ len = read_random(buf, MIN(req->oldlen, sizeof(buf))); return (SYSCTL_OUT(req, buf, len)); }
static uint64_t run_test_once(void *in, size_t size, TEEC_Operation *op, unsigned int l) { struct timespec t0, t1; TEEC_Result res; uint32_t ret_origin; if (random_in) read_random(in, size); get_current_time(&t0); res = TEEC_InvokeCommand(&sess, TA_SHA_PERF_CMD_PROCESS, op, &ret_origin); check_res(res, "TEEC_InvokeCommand"); get_current_time(&t1); return timespec_diff_ns(&t0, &t1); }
void ipx_init() { ipx_broadnet = *(union ipx_net *)allones; ipx_broadhost = *(union ipx_host *)allones; read_random(&ipx_pexseq, sizeof ipx_pexseq); ipxintrq.ifq_maxlen = ipxqmaxlen; ipxpcb.ipxp_next = ipxpcb.ipxp_prev = &ipxpcb; ipxrawpcb.ipxp_next = ipxrawpcb.ipxp_prev = &ipxrawpcb; ipx_netmask.sipx_len = 6; ipx_netmask.sipx_addr.x_net = ipx_broadnet; ipx_hostmask.sipx_len = 12; ipx_hostmask.sipx_addr.x_net = ipx_broadnet; ipx_hostmask.sipx_addr.x_host = ipx_broadhost; }
/* * Stir our S-box. */ static void arc4_randomstir (void) { u_int8_t key[256]; int r, n; struct timeval tv_now; /* * XXX read_random() returns unsafe numbers if the entropy * device is not loaded -- MarkM. */ r = read_random(key, ARC4_KEYBYTES); getmicrouptime(&tv_now); mtx_lock(&arc4_mtx); /* If r == 0 || -1, just use what was on the stack. */ if (r > 0) { for (n = r; n < sizeof(key); n++) key[n] = key[n % r]; } for (n = 0; n < 256; n++) { arc4_j = (arc4_j + arc4_sbox[n] + key[n]) % 256; arc4_swap(&arc4_sbox[n], &arc4_sbox[arc4_j]); } /* Reset for next reseed cycle. */ arc4_t_reseed = tv_now.tv_sec + ARC4_RESEED_SECONDS; arc4_numruns = 0; /* * Throw away the first N words of output, as suggested in the * paper "Weaknesses in the Key Scheduling Algorithm of RC4" * by Fluher, Mantin, and Shamir. (N = 256 in our case.) */ for (n = 0; n < 256 * 4; n++) arc4_randbyte(); mtx_unlock(&arc4_mtx); }
/* * ESP output routine, called by ipsec[46]_process_packet(). */ static int esp_output( struct mbuf *m, struct ipsecrequest *isr, struct mbuf **mp, int skip, int protoff ) { struct enc_xform *espx; struct auth_hash *esph; int hlen, rlen, plen, padding, blks, alen, i, roff; struct mbuf *mo = (struct mbuf *) NULL; struct tdb_crypto *tc; struct secasvar *sav; struct secasindex *saidx; unsigned char *pad; u_int8_t prot; int error, maxpacketsize; struct cryptodesc *crde = NULL, *crda = NULL; struct cryptop *crp; SPLASSERT(net, "esp_output"); sav = isr->sav; KASSERT(sav != NULL, ("esp_output: null SA")); esph = sav->tdb_authalgxform; espx = sav->tdb_encalgxform; KASSERT(espx != NULL, ("esp_output: null encoding xform")); if (sav->flags & SADB_X_EXT_OLD) hlen = sizeof (struct esp) + sav->ivlen; else hlen = sizeof (struct newesp) + sav->ivlen; rlen = m->m_pkthdr.len - skip; /* Raw payload length. */ /* * NB: The null encoding transform has a blocksize of 4 * so that headers are properly aligned. */ blks = espx->blocksize; /* IV blocksize */ /* XXX clamp padding length a la KAME??? */ padding = ((blks - ((rlen + 2) % blks)) % blks) + 2; plen = rlen + padding; /* Padded payload length. */ if (esph) alen = AH_HMAC_HASHLEN; else alen = 0; espstat.esps_output++; saidx = &sav->sah->saidx; /* Check for maximum packet size violations. */ switch (saidx->dst.sa.sa_family) { #ifdef INET case AF_INET: maxpacketsize = IP_MAXPACKET; break; #endif /* INET */ #ifdef INET6 case AF_INET6: maxpacketsize = IPV6_MAXPACKET; break; #endif /* INET6 */ default: DPRINTF(("esp_output: unknown/unsupported protocol " "family %d, SA %s/%08lx\n", saidx->dst.sa.sa_family, ipsec_address(&saidx->dst), (u_long) ntohl(sav->spi))); espstat.esps_nopf++; error = EPFNOSUPPORT; goto bad; } if (skip + hlen + rlen + padding + alen > maxpacketsize) { DPRINTF(("esp_output: packet in SA %s/%08lx got too big " "(len %u, max len %u)\n", ipsec_address(&saidx->dst), (u_long) ntohl(sav->spi), skip + hlen + rlen + padding + alen, maxpacketsize)); espstat.esps_toobig++; error = EMSGSIZE; goto bad; } /* Update the counters. */ espstat.esps_obytes += m->m_pkthdr.len - skip; m = m_clone(m); if (m == NULL) { DPRINTF(("esp_output: cannot clone mbuf chain, SA %s/%08lx\n", ipsec_address(&saidx->dst), (u_long) ntohl(sav->spi))); espstat.esps_hdrops++; error = ENOBUFS; goto bad; } /* Inject ESP header. */ mo = m_makespace(m, skip, hlen, &roff); if (mo == NULL) { DPRINTF(("esp_output: failed to inject %u byte ESP hdr for SA " "%s/%08lx\n", hlen, ipsec_address(&saidx->dst), (u_long) ntohl(sav->spi))); espstat.esps_hdrops++; /* XXX diffs from openbsd */ error = ENOBUFS; goto bad; } /* Initialize ESP header. */ bcopy((caddr_t) &sav->spi, mtod(mo, caddr_t) + roff, sizeof(u_int32_t)); if (sav->replay) { u_int32_t replay = htonl(++(sav->replay->count)); bcopy((caddr_t) &replay, mtod(mo, caddr_t) + roff + sizeof(u_int32_t), sizeof(u_int32_t)); } /* * Add padding -- better to do it ourselves than use the crypto engine, * although if/when we support compression, we'd have to do that. */ pad = (u_char *) m_pad(m, padding + alen); if (pad == NULL) { DPRINTF(("esp_output: m_pad failed for SA %s/%08lx\n", ipsec_address(&saidx->dst), (u_long) ntohl(sav->spi))); m = NULL; /* NB: free'd by m_pad */ error = ENOBUFS; goto bad; } /* * Add padding: random, zero, or self-describing. * XXX catch unexpected setting */ switch (sav->flags & SADB_X_EXT_PMASK) { case SADB_X_EXT_PRAND: (void) read_random(pad, padding - 2); break; case SADB_X_EXT_PZERO: bzero(pad, padding - 2); break; case SADB_X_EXT_PSEQ: for (i = 0; i < padding - 2; i++) pad[i] = i+1; break; } /* Fix padding length and Next Protocol in padding itself. */ pad[padding - 2] = padding - 2; m_copydata(m, protoff, sizeof(u_int8_t), pad + padding - 1); /* Fix Next Protocol in IPv4/IPv6 header. */ prot = IPPROTO_ESP; m_copyback(m, protoff, sizeof(u_int8_t), (u_char *) &prot); /* Get crypto descriptors. */ crp = crypto_getreq(esph && espx ? 2 : 1); if (crp == NULL) { DPRINTF(("esp_output: failed to acquire crypto descriptors\n")); espstat.esps_crypto++; error = ENOBUFS; goto bad; } if (espx) { crde = crp->crp_desc; crda = crde->crd_next; /* Encryption descriptor. */ crde->crd_skip = skip + hlen; crde->crd_len = m->m_pkthdr.len - (skip + hlen + alen); crde->crd_flags = CRD_F_ENCRYPT; crde->crd_inject = skip + hlen - sav->ivlen; /* Encryption operation. */ crde->crd_alg = espx->type; crde->crd_key = _KEYBUF(sav->key_enc); crde->crd_klen = _KEYBITS(sav->key_enc); /* XXX Rounds ? */ } else crda = crp->crp_desc; /* IPsec-specific opaque crypto info. */ tc = (struct tdb_crypto *) malloc(sizeof(struct tdb_crypto), M_XDATA, M_NOWAIT|M_ZERO); if (tc == NULL) { crypto_freereq(crp); DPRINTF(("esp_output: failed to allocate tdb_crypto\n")); espstat.esps_crypto++; error = ENOBUFS; goto bad; } /* Callback parameters */ tc->tc_isr = isr; tc->tc_spi = sav->spi; tc->tc_dst = saidx->dst; tc->tc_proto = saidx->proto; /* Crypto operation descriptor. */ crp->crp_ilen = m->m_pkthdr.len; /* Total input length. */ crp->crp_flags = CRYPTO_F_IMBUF; crp->crp_buf = (caddr_t) m; crp->crp_callback = esp_output_cb; crp->crp_opaque = (caddr_t) tc; crp->crp_sid = sav->tdb_cryptoid; if (esph) { /* Authentication descriptor. */ crda->crd_skip = skip; crda->crd_len = m->m_pkthdr.len - (skip + alen); crda->crd_inject = m->m_pkthdr.len - alen; /* Authentication operation. */ crda->crd_alg = esph->type; crda->crd_key = _KEYBUF(sav->key_auth); crda->crd_klen = _KEYBITS(sav->key_auth); } return crypto_dispatch(crp); bad: if (m) m_freem(m); return (error); }
static int talitos_process(device_t dev, struct cryptop *crp, int hint) { int i, err = 0, ivsize; struct talitos_softc *sc = device_get_softc(dev); struct cryptodesc *crd1, *crd2, *maccrd, *enccrd; caddr_t iv; struct talitos_session *ses; struct talitos_desc *td; unsigned long flags; /* descriptor mappings */ int hmac_key, hmac_data, cipher_iv, cipher_key, in_fifo, out_fifo, cipher_iv_out; static int chsel = -1; u_int32_t rand_iv[4]; DPRINTF("%s()\n", __FUNCTION__); if (crp == NULL || crp->crp_callback == NULL || sc == NULL) { return EINVAL; } crp->crp_etype = 0; if (TALITOS_SESSION(crp->crp_sid) >= sc->sc_nsessions) { return EINVAL; } ses = &sc->sc_sessions[TALITOS_SESSION(crp->crp_sid)]; /* enter the channel scheduler */ spin_lock_irqsave(&sc->sc_chnfifolock[sc->sc_num_channels], flags); /* reuse channel that already had/has requests for the required EU */ for (i = 0; i < sc->sc_num_channels; i++) { if (sc->sc_chnlastalg[i] == crp->crp_desc->crd_alg) break; } if (i == sc->sc_num_channels) { /* * haven't seen this algo the last sc_num_channels or more * use round robin in this case * nb: sc->sc_num_channels must be power of 2 */ chsel = (chsel + 1) & (sc->sc_num_channels - 1); } else { /* * matches channel with same target execution unit; * use same channel in this case */ chsel = i; } sc->sc_chnlastalg[chsel] = crp->crp_desc->crd_alg; /* release the channel scheduler lock */ spin_unlock_irqrestore(&sc->sc_chnfifolock[sc->sc_num_channels], flags); /* acquire the selected channel fifo lock */ spin_lock_irqsave(&sc->sc_chnfifolock[chsel], flags); /* find and reserve next available descriptor-cryptop pair */ for (i = 0; i < sc->sc_chfifo_len; i++) { if (sc->sc_chnfifo[chsel][i].cf_desc.hdr == 0) { /* * ensure correct descriptor formation by * avoiding inadvertently setting "optional" entries * e.g. not using "optional" dptr2 for MD/HMAC descs */ memset(&sc->sc_chnfifo[chsel][i].cf_desc, 0, sizeof(*td)); /* reserve it with done notification request bit */ sc->sc_chnfifo[chsel][i].cf_desc.hdr |= TALITOS_DONE_NOTIFY; break; } } spin_unlock_irqrestore(&sc->sc_chnfifolock[chsel], flags); if (i == sc->sc_chfifo_len) { /* fifo full */ err = ERESTART; goto errout; } td = &sc->sc_chnfifo[chsel][i].cf_desc; sc->sc_chnfifo[chsel][i].cf_crp = crp; crd1 = crp->crp_desc; if (crd1 == NULL) { err = EINVAL; goto errout; } crd2 = crd1->crd_next; /* prevent compiler warning */ hmac_key = 0; hmac_data = 0; if (crd2 == NULL) { td->hdr |= TD_TYPE_COMMON_NONSNOOP_NO_AFEU; /* assign descriptor dword ptr mappings for this desc. type */ cipher_iv = 1; cipher_key = 2; in_fifo = 3; cipher_iv_out = 5; if (crd1->crd_alg == CRYPTO_MD5_HMAC || crd1->crd_alg == CRYPTO_SHA1_HMAC || crd1->crd_alg == CRYPTO_SHA1 || crd1->crd_alg == CRYPTO_MD5) { out_fifo = 5; maccrd = crd1; enccrd = NULL; } else if (crd1->crd_alg == CRYPTO_DES_CBC || crd1->crd_alg == CRYPTO_3DES_CBC || crd1->crd_alg == CRYPTO_AES_CBC || crd1->crd_alg == CRYPTO_ARC4) { out_fifo = 4; maccrd = NULL; enccrd = crd1; } else { DPRINTF("UNKNOWN crd1->crd_alg %d\n", crd1->crd_alg); err = EINVAL; goto errout; } } else { if (sc->sc_desc_types & TALITOS_HAS_DT_IPSEC_ESP) { td->hdr |= TD_TYPE_IPSEC_ESP; } else { DPRINTF("unimplemented: multiple descriptor ipsec\n"); err = EINVAL; goto errout; } /* assign descriptor dword ptr mappings for this desc. type */ hmac_key = 0; hmac_data = 1; cipher_iv = 2; cipher_key = 3; in_fifo = 4; out_fifo = 5; cipher_iv_out = 6; if ((crd1->crd_alg == CRYPTO_MD5_HMAC || crd1->crd_alg == CRYPTO_SHA1_HMAC || crd1->crd_alg == CRYPTO_MD5 || crd1->crd_alg == CRYPTO_SHA1) && (crd2->crd_alg == CRYPTO_DES_CBC || crd2->crd_alg == CRYPTO_3DES_CBC || crd2->crd_alg == CRYPTO_AES_CBC || crd2->crd_alg == CRYPTO_ARC4) && ((crd2->crd_flags & CRD_F_ENCRYPT) == 0)) { maccrd = crd1; enccrd = crd2; } else if ((crd1->crd_alg == CRYPTO_DES_CBC || crd1->crd_alg == CRYPTO_ARC4 || crd1->crd_alg == CRYPTO_3DES_CBC || crd1->crd_alg == CRYPTO_AES_CBC) && (crd2->crd_alg == CRYPTO_MD5_HMAC || crd2->crd_alg == CRYPTO_SHA1_HMAC || crd2->crd_alg == CRYPTO_MD5 || crd2->crd_alg == CRYPTO_SHA1) && (crd1->crd_flags & CRD_F_ENCRYPT)) { enccrd = crd1; maccrd = crd2; } else { /* We cannot order the SEC as requested */ printk("%s: cannot do the order\n", device_get_nameunit(sc->sc_cdev)); err = EINVAL; goto errout; } } /* assign in_fifo and out_fifo based on input/output struct type */ if (crp->crp_flags & CRYPTO_F_SKBUF) { /* using SKB buffers */ struct sk_buff *skb = (struct sk_buff *)crp->crp_buf; if (skb_shinfo(skb)->nr_frags) { printk("%s: skb frags unimplemented\n", device_get_nameunit(sc->sc_cdev)); err = EINVAL; goto errout; } td->ptr[in_fifo].ptr = dma_map_single(NULL, skb->data, skb->len, DMA_TO_DEVICE); td->ptr[in_fifo].len = skb->len; td->ptr[out_fifo].ptr = dma_map_single(NULL, skb->data, skb->len, DMA_TO_DEVICE); td->ptr[out_fifo].len = skb->len; td->ptr[hmac_data].ptr = dma_map_single(NULL, skb->data, skb->len, DMA_TO_DEVICE); } else if (crp->crp_flags & CRYPTO_F_IOV) { /* using IOV buffers */ struct uio *uiop = (struct uio *)crp->crp_buf; if (uiop->uio_iovcnt > 1) { printk("%s: iov frags unimplemented\n", device_get_nameunit(sc->sc_cdev)); err = EINVAL; goto errout; } td->ptr[in_fifo].ptr = dma_map_single(NULL, uiop->uio_iov->iov_base, crp->crp_ilen, DMA_TO_DEVICE); td->ptr[in_fifo].len = crp->crp_ilen; /* crp_olen is never set; always use crp_ilen */ td->ptr[out_fifo].ptr = dma_map_single(NULL, uiop->uio_iov->iov_base, crp->crp_ilen, DMA_TO_DEVICE); td->ptr[out_fifo].len = crp->crp_ilen; } else { /* using contig buffers */ td->ptr[in_fifo].ptr = dma_map_single(NULL, crp->crp_buf, crp->crp_ilen, DMA_TO_DEVICE); td->ptr[in_fifo].len = crp->crp_ilen; td->ptr[out_fifo].ptr = dma_map_single(NULL, crp->crp_buf, crp->crp_ilen, DMA_TO_DEVICE); td->ptr[out_fifo].len = crp->crp_ilen; } if (enccrd) { switch (enccrd->crd_alg) { case CRYPTO_3DES_CBC: td->hdr |= TALITOS_MODE0_DEU_3DES; /* FALLTHROUGH */ case CRYPTO_DES_CBC: td->hdr |= TALITOS_SEL0_DEU | TALITOS_MODE0_DEU_CBC; if (enccrd->crd_flags & CRD_F_ENCRYPT) td->hdr |= TALITOS_MODE0_DEU_ENC; ivsize = 2*sizeof(u_int32_t); DPRINTF("%cDES ses %d ch %d len %d\n", (td->hdr & TALITOS_MODE0_DEU_3DES)?'3':'1', (u32)TALITOS_SESSION(crp->crp_sid), chsel, td->ptr[in_fifo].len); break; case CRYPTO_AES_CBC: td->hdr |= TALITOS_SEL0_AESU | TALITOS_MODE0_AESU_CBC; if (enccrd->crd_flags & CRD_F_ENCRYPT) td->hdr |= TALITOS_MODE0_AESU_ENC; ivsize = 4*sizeof(u_int32_t); DPRINTF("AES ses %d ch %d len %d\n", (u32)TALITOS_SESSION(crp->crp_sid), chsel, td->ptr[in_fifo].len); break; default: printk("%s: unimplemented enccrd->crd_alg %d\n", device_get_nameunit(sc->sc_cdev), enccrd->crd_alg); err = EINVAL; goto errout; } /* * Setup encrypt/decrypt state. When using basic ops * we can't use an inline IV because hash/crypt offset * must be from the end of the IV to the start of the * crypt data and this leaves out the preceding header * from the hash calculation. Instead we place the IV * in the state record and set the hash/crypt offset to * copy both the header+IV. */ if (enccrd->crd_flags & CRD_F_ENCRYPT) { td->hdr |= TALITOS_DIR_OUTBOUND; if (enccrd->crd_flags & CRD_F_IV_EXPLICIT) iv = enccrd->crd_iv; else read_random((iv = (caddr_t) rand_iv), sizeof(rand_iv)); if ((enccrd->crd_flags & CRD_F_IV_PRESENT) == 0) { crypto_copyback(crp->crp_flags, crp->crp_buf, enccrd->crd_inject, ivsize, iv); } } else { td->hdr |= TALITOS_DIR_INBOUND; if (enccrd->crd_flags & CRD_F_IV_EXPLICIT) { iv = enccrd->crd_iv; } else { iv = (caddr_t) rand_iv; crypto_copydata(crp->crp_flags, crp->crp_buf, enccrd->crd_inject, ivsize, iv); } } td->ptr[cipher_iv].ptr = dma_map_single(NULL, iv, ivsize, DMA_TO_DEVICE); td->ptr[cipher_iv].len = ivsize; /* * we don't need the cipher iv out length/pointer * field to do ESP IPsec. Therefore we set the len field as 0, * which tells the SEC not to do anything with this len/ptr * field. Previously, when length/pointer as pointing to iv, * it gave us corruption of packets. */ td->ptr[cipher_iv_out].len = 0; } if (enccrd && maccrd) { /* this is ipsec only for now */ td->hdr |= TALITOS_SEL1_MDEU | TALITOS_MODE1_MDEU_INIT | TALITOS_MODE1_MDEU_PAD; switch (maccrd->crd_alg) { case CRYPTO_MD5: td->hdr |= TALITOS_MODE1_MDEU_MD5; break; case CRYPTO_MD5_HMAC: td->hdr |= TALITOS_MODE1_MDEU_MD5_HMAC; break; case CRYPTO_SHA1: td->hdr |= TALITOS_MODE1_MDEU_SHA1; break; case CRYPTO_SHA1_HMAC: td->hdr |= TALITOS_MODE1_MDEU_SHA1_HMAC; break; default: /* We cannot order the SEC as requested */ printk("%s: cannot do the order\n", device_get_nameunit(sc->sc_cdev)); err = EINVAL; goto errout; } if ((maccrd->crd_alg == CRYPTO_MD5_HMAC) || (maccrd->crd_alg == CRYPTO_SHA1_HMAC)) { /* * The offset from hash data to the start of * crypt data is the difference in the skips. */ /* ipsec only for now */ td->ptr[hmac_key].ptr = dma_map_single(NULL, ses->ses_hmac, ses->ses_hmac_len, DMA_TO_DEVICE); td->ptr[hmac_key].len = ses->ses_hmac_len; td->ptr[in_fifo].ptr += enccrd->crd_skip; td->ptr[in_fifo].len = enccrd->crd_len; td->ptr[out_fifo].ptr += enccrd->crd_skip; td->ptr[out_fifo].len = enccrd->crd_len; /* bytes of HMAC to postpend to ciphertext */ td->ptr[out_fifo].extent = ses->ses_mlen; td->ptr[hmac_data].ptr += maccrd->crd_skip; td->ptr[hmac_data].len = enccrd->crd_skip - maccrd->crd_skip; } if (enccrd->crd_flags & CRD_F_KEY_EXPLICIT) { printk("%s: CRD_F_KEY_EXPLICIT unimplemented\n", device_get_nameunit(sc->sc_cdev)); } } if (!enccrd && maccrd) { /* single MD5 or SHA */ td->hdr |= TALITOS_SEL0_MDEU | TALITOS_MODE0_MDEU_INIT | TALITOS_MODE0_MDEU_PAD; switch (maccrd->crd_alg) { case CRYPTO_MD5: td->hdr |= TALITOS_MODE0_MDEU_MD5; DPRINTF("MD5 ses %d ch %d len %d\n", (u32)TALITOS_SESSION(crp->crp_sid), chsel, td->ptr[in_fifo].len); break; case CRYPTO_MD5_HMAC: td->hdr |= TALITOS_MODE0_MDEU_MD5_HMAC; break; case CRYPTO_SHA1: td->hdr |= TALITOS_MODE0_MDEU_SHA1; DPRINTF("SHA1 ses %d ch %d len %d\n", (u32)TALITOS_SESSION(crp->crp_sid), chsel, td->ptr[in_fifo].len); break; case CRYPTO_SHA1_HMAC: td->hdr |= TALITOS_MODE0_MDEU_SHA1_HMAC; break; default: /* We cannot order the SEC as requested */ DPRINTF("cannot do the order\n"); err = EINVAL; goto errout; } if (crp->crp_flags & CRYPTO_F_IOV) td->ptr[out_fifo].ptr += maccrd->crd_inject; if ((maccrd->crd_alg == CRYPTO_MD5_HMAC) || (maccrd->crd_alg == CRYPTO_SHA1_HMAC)) { td->ptr[hmac_key].ptr = dma_map_single(NULL, ses->ses_hmac, ses->ses_hmac_len, DMA_TO_DEVICE); td->ptr[hmac_key].len = ses->ses_hmac_len; } } else { /* using process key (session data has duplicate) */ td->ptr[cipher_key].ptr = dma_map_single(NULL, enccrd->crd_key, (enccrd->crd_klen + 7) / 8, DMA_TO_DEVICE); td->ptr[cipher_key].len = (enccrd->crd_klen + 7) / 8; } /* descriptor complete - GO! */ return talitos_submit(sc, td, chsel); errout: if (err != ERESTART) { crp->crp_etype = err; crypto_done(crp); } return err; }
static int pread_f( int argc, char **argv) { size_t bsize; off64_t offset; unsigned int zeed = 0; long long count, total, tmp; size_t fsblocksize, fssectsize; struct timeval t1, t2; char s1[64], s2[64], ts[64]; char *sp; int Cflag, qflag, uflag, vflag; int eof = 0, direction = IO_FORWARD; int c; Cflag = qflag = uflag = vflag = 0; init_cvtnum(&fsblocksize, &fssectsize); bsize = fsblocksize; while ((c = getopt(argc, argv, "b:BCFRquvV:Z:")) != EOF) { switch (c) { case 'b': tmp = cvtnum(fsblocksize, fssectsize, optarg); if (tmp < 0) { printf(_("non-numeric bsize -- %s\n"), optarg); return 0; } bsize = tmp; break; case 'C': Cflag = 1; break; case 'F': direction = IO_FORWARD; break; case 'B': direction = IO_BACKWARD; break; case 'R': direction = IO_RANDOM; break; case 'q': qflag = 1; break; case 'u': uflag = 1; break; case 'v': vflag = 1; break; #ifdef HAVE_PREADV case 'V': vectors = strtoul(optarg, &sp, 0); if (!sp || sp == optarg) { printf(_("non-numeric vector count == %s\n"), optarg); return 0; } break; #endif case 'Z': zeed = strtoul(optarg, &sp, 0); if (!sp || sp == optarg) { printf(_("non-numeric seed -- %s\n"), optarg); return 0; } break; default: return command_usage(&pread_cmd); } } if (optind != argc - 2) return command_usage(&pread_cmd); offset = cvtnum(fsblocksize, fssectsize, argv[optind]); if (offset < 0 && (direction & (IO_RANDOM|IO_BACKWARD))) { eof = -1; /* read from EOF */ } else if (offset < 0) { printf(_("non-numeric length argument -- %s\n"), argv[optind]); return 0; } optind++; count = cvtnum(fsblocksize, fssectsize, argv[optind]); if (count < 0 && (direction & (IO_RANDOM|IO_FORWARD))) { eof = -1; /* read to EOF */ } else if (count < 0) { printf(_("non-numeric length argument -- %s\n"), argv[optind]); return 0; } if (alloc_buffer(bsize, uflag, 0xabababab) < 0) return 0; gettimeofday(&t1, NULL); switch (direction) { case IO_RANDOM: if (!zeed) /* srandom seed */ zeed = time(NULL); c = read_random(file->fd, offset, count, &total, zeed, eof); break; case IO_FORWARD: c = read_forward(file->fd, offset, count, &total, vflag, 0, eof); if (eof) count = total; break; case IO_BACKWARD: c = read_backward(file->fd, &offset, &count, &total, eof); break; default: ASSERT(0); } if (c < 0) return 0; if (qflag) return 0; gettimeofday(&t2, NULL); t2 = tsub(t2, t1); /* Finally, report back -- -C gives a parsable format */ timestr(&t2, ts, sizeof(ts), Cflag ? VERBOSE_FIXED_TIME : 0); if (!Cflag) { cvtstr((double)total, s1, sizeof(s1)); cvtstr(tdiv((double)total, t2), s2, sizeof(s2)); printf(_("read %lld/%lld bytes at offset %lld\n"), total, count, (long long)offset); printf(_("%s, %d ops; %s (%s/sec and %.4f ops/sec)\n"), s1, c, ts, s2, tdiv((double)c, t2)); } else {/* bytes,ops,time,bytes/sec,ops/sec */ printf("%lld,%d,%s,%.3f,%.3f\n", total, c, ts, tdiv((double)total, t2), tdiv((double)c, t2)); } return 0; }
/* * Generate a new software session. */ static int talitos_newsession(device_t dev, u_int32_t *sidp, struct cryptoini *cri) { struct cryptoini *c, *encini = NULL, *macini = NULL; struct talitos_softc *sc = device_get_softc(dev); struct talitos_session *ses = NULL; int sesn; DPRINTF("%s()\n", __FUNCTION__); if (sidp == NULL || cri == NULL || sc == NULL) { DPRINTF("%s,%d - EINVAL\n", __FILE__, __LINE__); return EINVAL; } for (c = cri; c != NULL; c = c->cri_next) { if (c->cri_alg == CRYPTO_MD5 || c->cri_alg == CRYPTO_MD5_HMAC || c->cri_alg == CRYPTO_SHA1 || c->cri_alg == CRYPTO_SHA1_HMAC || c->cri_alg == CRYPTO_NULL_HMAC) { if (macini) return EINVAL; macini = c; } else if (c->cri_alg == CRYPTO_DES_CBC || c->cri_alg == CRYPTO_3DES_CBC || c->cri_alg == CRYPTO_AES_CBC || c->cri_alg == CRYPTO_NULL_CBC) { if (encini) return EINVAL; encini = c; } else { DPRINTF("UNKNOWN c->cri_alg %d\n", encini->cri_alg); return EINVAL; } } if (encini == NULL && macini == NULL) return EINVAL; if (encini) { /* validate key length */ switch (encini->cri_alg) { case CRYPTO_DES_CBC: if (encini->cri_klen != 64) return EINVAL; break; case CRYPTO_3DES_CBC: if (encini->cri_klen != 192) { return EINVAL; } break; case CRYPTO_AES_CBC: if (encini->cri_klen != 128 && encini->cri_klen != 192 && encini->cri_klen != 256) return EINVAL; break; default: DPRINTF("UNKNOWN encini->cri_alg %d\n", encini->cri_alg); return EINVAL; } } if (sc->sc_sessions == NULL) { ses = sc->sc_sessions = (struct talitos_session *) kmalloc(sizeof(struct talitos_session), SLAB_ATOMIC); if (ses == NULL) return ENOMEM; memset(ses, 0, sizeof(struct talitos_session)); sesn = 0; sc->sc_nsessions = 1; } else { for (sesn = 0; sesn < sc->sc_nsessions; sesn++) { if (sc->sc_sessions[sesn].ses_used == 0) { ses = &sc->sc_sessions[sesn]; break; } } if (ses == NULL) { /* allocating session */ sesn = sc->sc_nsessions; ses = (struct talitos_session *) kmalloc( (sesn + 1) * sizeof(struct talitos_session), SLAB_ATOMIC); if (ses == NULL) return ENOMEM; memset(ses, 0, (sesn + 1) * sizeof(struct talitos_session)); memcpy(ses, sc->sc_sessions, sesn * sizeof(struct talitos_session)); memset(sc->sc_sessions, 0, sesn * sizeof(struct talitos_session)); kfree(sc->sc_sessions); sc->sc_sessions = ses; ses = &sc->sc_sessions[sesn]; sc->sc_nsessions++; } } ses->ses_used = 1; if (encini) { /* get an IV */ /* XXX may read fewer than requested */ read_random(ses->ses_iv, sizeof(ses->ses_iv)); ses->ses_klen = (encini->cri_klen + 7) / 8; memcpy(ses->ses_key, encini->cri_key, ses->ses_klen); if (macini) { /* doing hash on top of cipher */ ses->ses_hmac_len = (macini->cri_klen + 7) / 8; memcpy(ses->ses_hmac, macini->cri_key, ses->ses_hmac_len); } } else if (macini) { /* doing hash */ ses->ses_klen = (macini->cri_klen + 7) / 8; memcpy(ses->ses_key, macini->cri_key, ses->ses_klen); } /* back compat way of determining MSC result len */ if (macini) { ses->ses_mlen = macini->cri_mlen; if (ses->ses_mlen == 0) { if (macini->cri_alg == CRYPTO_MD5_HMAC) ses->ses_mlen = MD5_HASH_LEN; else ses->ses_mlen = SHA1_HASH_LEN; } } /* really should make up a template td here, * and only fill things like i/o and direction in process() */ /* assign session ID */ *sidp = TALITOS_SID(sc->sc_num, sesn); return 0; }
/* * Now running in a thread. Kick off other services, * invoke user bootstrap, enter pageout loop. */ static void kernel_bootstrap_thread(void) { processor_t processor = current_processor(); #define kernel_bootstrap_thread_kprintf(x...) /* kprintf("kernel_bootstrap_thread: " x) */ kernel_bootstrap_thread_log("idle_thread_create"); /* * Create the idle processor thread. */ idle_thread_create(processor); /* * N.B. Do not stick anything else * before this point. * * Start up the scheduler services. */ kernel_bootstrap_thread_log("sched_startup"); sched_startup(); /* * Thread lifecycle maintenance (teardown, stack allocation) */ kernel_bootstrap_thread_log("thread_daemon_init"); thread_daemon_init(); /* Create kernel map entry reserve */ vm_kernel_reserved_entry_init(); /* * Thread callout service. */ kernel_bootstrap_thread_log("thread_call_initialize"); thread_call_initialize(); /* * Remain on current processor as * additional processors come online. */ kernel_bootstrap_thread_log("thread_bind"); thread_bind(processor); /* * Initialize ipc thread call support. */ kernel_bootstrap_thread_log("ipc_thread_call_init"); ipc_thread_call_init(); /* * Kick off memory mapping adjustments. */ kernel_bootstrap_thread_log("mapping_adjust"); mapping_adjust(); /* * Create the clock service. */ kernel_bootstrap_thread_log("clock_service_create"); clock_service_create(); /* * Create the device service. */ device_service_create(); kth_started = 1; #if (defined(__i386__) || defined(__x86_64__)) && NCOPY_WINDOWS > 0 /* * Create and initialize the physical copy window for processor 0 * This is required before starting kicking off IOKit. */ cpu_physwindow_init(0); #endif #if MACH_KDP kernel_bootstrap_log("kdp_init"); kdp_init(); #endif #if ALTERNATE_DEBUGGER alternate_debugger_init(); #endif #if KPC kpc_init(); #endif #if CONFIG_ECC_LOGGING ecc_log_init(); #endif #if KPERF kperf_bootstrap(); #endif #if HYPERVISOR hv_support_init(); #endif #if CONFIG_TELEMETRY kernel_bootstrap_log("bootprofile_init"); bootprofile_init(); #endif #if (defined(__i386__) || defined(__x86_64__)) && CONFIG_VMX vmx_init(); #endif #if (defined(__i386__) || defined(__x86_64__)) if (kdebug_serial) { new_nkdbufs = 1; if (trace_typefilter == 0) trace_typefilter = 1; } if (turn_on_log_leaks && !new_nkdbufs) new_nkdbufs = 200000; if (trace_typefilter) start_kern_tracing_with_typefilter(new_nkdbufs, FALSE, trace_typefilter); else start_kern_tracing(new_nkdbufs, FALSE); if (turn_on_log_leaks) log_leaks = 1; #endif kernel_bootstrap_log("prng_init"); prng_cpu_init(master_cpu); #ifdef IOKIT PE_init_iokit(); #endif assert(ml_get_interrupts_enabled() == FALSE); (void) spllo(); /* Allow interruptions */ #if (defined(__i386__) || defined(__x86_64__)) && NCOPY_WINDOWS > 0 /* * Create and initialize the copy window for processor 0 * This also allocates window space for all other processors. * However, this is dependent on the number of processors - so this call * must be after IOKit has been started because IOKit performs processor * discovery. */ cpu_userwindow_init(0); #endif #if (!defined(__i386__) && !defined(__x86_64__)) if (turn_on_log_leaks && !new_nkdbufs) new_nkdbufs = 200000; if (trace_typefilter) start_kern_tracing_with_typefilter(new_nkdbufs, FALSE, trace_typefilter); else start_kern_tracing(new_nkdbufs, FALSE); if (turn_on_log_leaks) log_leaks = 1; #endif /* * Initialize the shared region module. */ vm_shared_region_init(); vm_commpage_init(); vm_commpage_text_init(); #if CONFIG_MACF kernel_bootstrap_log("mac_policy_initmach"); mac_policy_initmach(); #endif #if CONFIG_SCHED_SFI kernel_bootstrap_log("sfi_init"); sfi_init(); #endif /* * Initialize the globals used for permuting kernel * addresses that may be exported to userland as tokens * using VM_KERNEL_ADDRPERM()/VM_KERNEL_ADDRPERM_EXTERNAL(). * Force the random number to be odd to avoid mapping a non-zero * word-aligned address to zero via addition. * Note: at this stage we can use the cryptographically secure PRNG * rather than early_random(). */ read_random(&vm_kernel_addrperm, sizeof(vm_kernel_addrperm)); vm_kernel_addrperm |= 1; read_random(&buf_kernel_addrperm, sizeof(buf_kernel_addrperm)); buf_kernel_addrperm |= 1; read_random(&vm_kernel_addrperm_ext, sizeof(vm_kernel_addrperm_ext)); vm_kernel_addrperm_ext |= 1; vm_set_restrictions(); /* * Start the user bootstrap. */ #ifdef MACH_BSD bsd_init(); #endif /* * Get rid of segments used to bootstrap kext loading. This removes * the KLD, PRELINK symtab, LINKEDIT, and symtab segments/load commands. */ OSKextRemoveKextBootstrap(); serial_keyboard_init(); /* Start serial keyboard if wanted */ vm_page_init_local_q(); thread_bind(PROCESSOR_NULL); /* * Become the pageout daemon. */ vm_pageout(); /*NOTREACHED*/ }
int main(int argc, char **argv) { if ( argc != 3) { printf("Usage: ./mp_small <mnt_directory> <num_of_threads>\n"); exit(1); } // Get the mount directory char *path = argv[1]; char *filename = "file"; // Set the number of threads. int nthreads = atoi(argv[2]); omp_set_num_threads(nthreads); int tid; double total_time = 0; double read_data = 0; double throughput = 0; struct drand48_data randBuffer; srand48_r(time(NULL), &randBuffer); timestamp start = 0; timestamp end = 0; /* Fork a team of threads with each thread having a private tid variable */ #pragma omp parallel private(tid) { tid = omp_get_thread_num(); // Setup int *fd_list = (int *) malloc( sizeof(int) * FILE_COUNT); // Open all the files int i = 0; long int random = 0; int idx = 0; size_t total_bytes = 0; for ( i = 0; i < FILE_COUNT; i++) { // Prepare the file name lrand48_r(&randBuffer, &random); idx = random % MAX_FILES + 1; char num[5]; sprintf(num, "%d", idx); char my_file[100] = {'\0'}; strcat(my_file, path); strcat(my_file, "/"); strcat(my_file, filename); strcat(my_file, num); fd_list[i] = open(my_file, FLAGS); int err = errno; if (fd_list[i] == -1) { printf(" %d : Could not open file descriptor for file %s. Error = %d\n", tid, my_file, err); exit(1); } } struct stat sb; if (fstat(fd_list[0], &sb) == -1) { printf("%d : File stat failed for file\n", tid); exit(1); } char *buf = (char *)malloc(BLOCK_SIZE); int pages = sb.st_size / BLOCK_SIZE; int *index = (int *)malloc(sizeof(int) * pages); randomize(index, pages); // Prepare for read. struct share_it state; state.fd_list = fd_list; state.offsets = index; state.buf = buf; state.size = sb.st_size; state.block_size = (32 * 1024); state.count = FILE_COUNT; state.duration = 0; state.total_bytes = &total_bytes; // Collect stats #pragma omp barrier if ( tid == 0) { RDTSCP(start); } // Wait to read #pragma omp barrier bool success = read_random(&state); if (!success) { printf("%d : Read failed\n", tid); exit(1); } #pragma omp barrier if ( tid == 0) { RDTSCP(end); } #pragma omp barrier // Close all the files. for( i = 0; i< FILE_COUNT; i++) close(fd_list[i]); free(fd_list); free(buf); if (tid == 0) { read_data = sb.st_size * FILE_COUNT; } } /* All threads join master thread and terminate */ double val = (read_data * nthreads * (CPU_FREQ * 1000000) ) / ( (end - start) * 1024 * 1024 * 1024); // GB/sec printf("%lf\n", val); }
/* * Tcp initialization */ void tcp_init() { int hashsize = TCBHASHSIZE; vm_size_t str_size; int i; tcp_ccgen = 1; tcp_cleartaocache(); tcp_delacktime = TCPTV_DELACK; tcp_keepinit = TCPTV_KEEP_INIT; tcp_keepidle = TCPTV_KEEP_IDLE; tcp_keepintvl = TCPTV_KEEPINTVL; tcp_maxpersistidle = TCPTV_KEEP_IDLE; tcp_msl = TCPTV_MSL; read_random(&tcp_now, sizeof(tcp_now)); tcp_now = tcp_now & 0x7fffffffffffffff; /* Starts tcp internal 500ms clock at a random value */ LIST_INIT(&tcb); tcbinfo.listhead = &tcb; #ifndef __APPLE__ TUNABLE_INT_FETCH("net.inet.tcp.tcbhashsize", &hashsize); #endif if (!powerof2(hashsize)) { printf("WARNING: TCB hash size not a power of 2\n"); hashsize = 512; /* safe default */ } tcp_tcbhashsize = hashsize; tcbinfo.hashsize = hashsize; tcbinfo.hashbase = hashinit(hashsize, M_PCB, &tcbinfo.hashmask); tcbinfo.porthashbase = hashinit(hashsize, M_PCB, &tcbinfo.porthashmask); #ifdef __APPLE__ str_size = (vm_size_t) sizeof(struct inp_tp); tcbinfo.ipi_zone = (void *) zinit(str_size, 120000*str_size, 8192, "tcpcb"); #else tcbinfo.ipi_zone = zinit("tcpcb", sizeof(struct inp_tp), maxsockets, ZONE_INTERRUPT, 0); #endif tcp_reass_maxseg = nmbclusters / 16; #ifndef __APPLE__ TUNABLE_INT_FETCH("net.inet.tcp.reass.maxsegments", &tcp_reass_maxseg); #endif #if INET6 #define TCP_MINPROTOHDR (sizeof(struct ip6_hdr) + sizeof(struct tcphdr)) #else /* INET6 */ #define TCP_MINPROTOHDR (sizeof(struct tcpiphdr)) #endif /* INET6 */ if (max_protohdr < TCP_MINPROTOHDR) max_protohdr = TCP_MINPROTOHDR; if (max_linkhdr + TCP_MINPROTOHDR > MHLEN) panic("tcp_init"); #undef TCP_MINPROTOHDR tcbinfo.last_pcb = 0; dummy_tcb.t_state = TCP_NSTATES; dummy_tcb.t_flags = 0; tcbinfo.dummy_cb = (caddr_t) &dummy_tcb; in_pcb_nat_init(&tcbinfo, AF_INET, IPPROTO_TCP, SOCK_STREAM); delack_bitmask = _MALLOC((4 * hashsize)/32, M_PCB, M_WAITOK); if (delack_bitmask == 0) panic("Delack Memory"); for (i=0; i < (tcbinfo.hashsize / 32); i++) delack_bitmask[i] = 0; for (i=0; i < N_TIME_WAIT_SLOTS; i++) { LIST_INIT(&time_wait_slots[i]); } }
static int pasemi_process(device_t dev, struct cryptop *crp, int hint) { int err = 0, ivsize, srclen = 0, reinit = 0, reinit_size = 0, chsel; struct pasemi_softc *sc = device_get_softc(dev); struct cryptodesc *crd1, *crd2, *maccrd, *enccrd; caddr_t ivp; struct pasemi_desc init_desc, work_desc; struct pasemi_session *ses; struct sk_buff *skb; struct uio *uiop; unsigned long flags; struct pasemi_fnu_txring *txring; DPRINTF("%s()\n", __FUNCTION__); if (crp == NULL || crp->crp_callback == NULL || sc == NULL) return -EINVAL; crp->crp_etype = 0; if (PASEMI_SESSION(crp->crp_sid) >= sc->sc_nsessions) return -EINVAL; ses = sc->sc_sessions[PASEMI_SESSION(crp->crp_sid)]; crd1 = crp->crp_desc; if (crd1 == NULL) { err = -EINVAL; goto errout; } crd2 = crd1->crd_next; if (ALG_IS_SIG(crd1->crd_alg)) { maccrd = crd1; if (crd2 == NULL) enccrd = NULL; else if (ALG_IS_CIPHER(crd2->crd_alg) && (crd2->crd_flags & CRD_F_ENCRYPT) == 0) enccrd = crd2; else goto erralg; } else if (ALG_IS_CIPHER(crd1->crd_alg)) { enccrd = crd1; if (crd2 == NULL) maccrd = NULL; else if (ALG_IS_SIG(crd2->crd_alg) && (crd1->crd_flags & CRD_F_ENCRYPT)) maccrd = crd2; else goto erralg; } else goto erralg; chsel = ses->chan; txring = &sc->tx[chsel]; if (enccrd && !maccrd) { if (enccrd->crd_alg == CRYPTO_ARC4) reinit = 1; reinit_size = 0x40; srclen = crp->crp_ilen; pasemi_desc_start(&work_desc, XCT_FUN_O | XCT_FUN_I | XCT_FUN_FUN(chsel)); if (enccrd->crd_flags & CRD_F_ENCRYPT) pasemi_desc_hdr(&work_desc, XCT_FUN_CRM_ENC); else pasemi_desc_hdr(&work_desc, XCT_FUN_CRM_DEC); } else if (enccrd && maccrd) { if (enccrd->crd_alg == CRYPTO_ARC4) reinit = 1; reinit_size = 0x68; if (enccrd->crd_flags & CRD_F_ENCRYPT) { /* Encrypt -> Authenticate */ pasemi_desc_start(&work_desc, XCT_FUN_O | XCT_FUN_I | XCT_FUN_CRM_ENC_SIG | XCT_FUN_A | XCT_FUN_FUN(chsel)); srclen = maccrd->crd_skip + maccrd->crd_len; } else { /* Authenticate -> Decrypt */ pasemi_desc_start(&work_desc, XCT_FUN_O | XCT_FUN_I | XCT_FUN_CRM_SIG_DEC | XCT_FUN_24BRES | XCT_FUN_FUN(chsel)); pasemi_desc_build(&work_desc, 0); pasemi_desc_build(&work_desc, 0); pasemi_desc_build(&work_desc, 0); work_desc.postop = PASEMI_CHECK_SIG; srclen = crp->crp_ilen; } pasemi_desc_hdr(&work_desc, XCT_FUN_SHL(maccrd->crd_skip / 4)); pasemi_desc_hdr(&work_desc, XCT_FUN_CHL(enccrd->crd_skip - maccrd->crd_skip)); } else if (!enccrd && maccrd) { srclen = maccrd->crd_len; pasemi_desc_start(&init_desc, XCT_CTRL_HDR(chsel, 0x58, DMA_FN_HKEY0)); pasemi_desc_build(&init_desc, XCT_FUN_SRC_PTR(0x58, ((struct pasemi_session *)ses->dma_addr)->hkey)); pasemi_desc_start(&work_desc, XCT_FUN_O | XCT_FUN_I | XCT_FUN_CRM_SIG | XCT_FUN_A | XCT_FUN_FUN(chsel)); } if (enccrd) { switch (enccrd->crd_alg) { case CRYPTO_3DES_CBC: pasemi_desc_hdr(&work_desc, XCT_FUN_ALG_3DES | XCT_FUN_BCM_CBC); ivsize = sizeof(u64); break; case CRYPTO_DES_CBC: pasemi_desc_hdr(&work_desc, XCT_FUN_ALG_DES | XCT_FUN_BCM_CBC); ivsize = sizeof(u64); break; case CRYPTO_AES_CBC: pasemi_desc_hdr(&work_desc, XCT_FUN_ALG_AES | XCT_FUN_BCM_CBC); ivsize = 2 * sizeof(u64); break; case CRYPTO_ARC4: pasemi_desc_hdr(&work_desc, XCT_FUN_ALG_ARC); ivsize = 0; break; default: printk(DRV_NAME ": unimplemented enccrd->crd_alg %d\n", enccrd->crd_alg); err = -EINVAL; goto errout; } ivp = (ivsize == sizeof(u64)) ? (caddr_t) &ses->civ[1] : (caddr_t) &ses->civ[0]; if (enccrd->crd_flags & CRD_F_ENCRYPT) { if (enccrd->crd_flags & CRD_F_IV_EXPLICIT) memcpy(ivp, enccrd->crd_iv, ivsize); else read_random(ivp, ivsize); /* If IV is not present in the buffer already, it has to be copied there */ if ((enccrd->crd_flags & CRD_F_IV_PRESENT) == 0) crypto_copyback(crp->crp_flags, crp->crp_buf, enccrd->crd_inject, ivsize, ivp); } else { if (enccrd->crd_flags & CRD_F_IV_EXPLICIT) /* IV is provided expicitly in descriptor */ memcpy(ivp, enccrd->crd_iv, ivsize); else /* IV is provided in the packet */ crypto_copydata(crp->crp_flags, crp->crp_buf, enccrd->crd_inject, ivsize, ivp); } } if (maccrd) { switch (maccrd->crd_alg) { case CRYPTO_MD5: pasemi_desc_hdr(&work_desc, XCT_FUN_SIG_MD5 | XCT_FUN_HSZ((crp->crp_ilen - maccrd->crd_inject) / 4)); break; case CRYPTO_SHA1: pasemi_desc_hdr(&work_desc, XCT_FUN_SIG_SHA1 | XCT_FUN_HSZ((crp->crp_ilen - maccrd->crd_inject) / 4)); break; case CRYPTO_MD5_HMAC: pasemi_desc_hdr(&work_desc, XCT_FUN_SIG_HMAC_MD5 | XCT_FUN_HSZ((crp->crp_ilen - maccrd->crd_inject) / 4)); break; case CRYPTO_SHA1_HMAC: pasemi_desc_hdr(&work_desc, XCT_FUN_SIG_HMAC_SHA1 | XCT_FUN_HSZ((crp->crp_ilen - maccrd->crd_inject) / 4)); break; default: printk(DRV_NAME ": unimplemented maccrd->crd_alg %d\n", maccrd->crd_alg); err = -EINVAL; goto errout; } } if (crp->crp_flags & CRYPTO_F_SKBUF) { /* using SKB buffers */ skb = (struct sk_buff *)crp->crp_buf; if (skb_shinfo(skb)->nr_frags) { printk(DRV_NAME ": skb frags unimplemented\n"); err = -EINVAL; goto errout; } pasemi_desc_build( &work_desc, XCT_FUN_DST_PTR(skb->len, pci_map_single( sc->dma_pdev, skb->data, skb->len, DMA_TO_DEVICE))); pasemi_desc_build( &work_desc, XCT_FUN_SRC_PTR( srclen, pci_map_single( sc->dma_pdev, skb->data, srclen, DMA_TO_DEVICE))); pasemi_desc_hdr(&work_desc, XCT_FUN_LLEN(srclen)); } else if (crp->crp_flags & CRYPTO_F_IOV) { /* using IOV buffers */ uiop = (struct uio *)crp->crp_buf; if (uiop->uio_iovcnt > 1) { printk(DRV_NAME ": iov frags unimplemented\n"); err = -EINVAL; goto errout; } /* crp_olen is never set; always use crp_ilen */ pasemi_desc_build( &work_desc, XCT_FUN_DST_PTR(crp->crp_ilen, pci_map_single( sc->dma_pdev, uiop->uio_iov->iov_base, crp->crp_ilen, DMA_TO_DEVICE))); pasemi_desc_hdr(&work_desc, XCT_FUN_LLEN(srclen)); pasemi_desc_build( &work_desc, XCT_FUN_SRC_PTR(srclen, pci_map_single( sc->dma_pdev, uiop->uio_iov->iov_base, srclen, DMA_TO_DEVICE))); } else { /* using contig buffers */ pasemi_desc_build( &work_desc, XCT_FUN_DST_PTR(crp->crp_ilen, pci_map_single( sc->dma_pdev, crp->crp_buf, crp->crp_ilen, DMA_TO_DEVICE))); pasemi_desc_build( &work_desc, XCT_FUN_SRC_PTR(srclen, pci_map_single( sc->dma_pdev, crp->crp_buf, srclen, DMA_TO_DEVICE))); pasemi_desc_hdr(&work_desc, XCT_FUN_LLEN(srclen)); } spin_lock_irqsave(&txring->fill_lock, flags); if (txring->sesn != PASEMI_SESSION(crp->crp_sid)) { txring->sesn = PASEMI_SESSION(crp->crp_sid); reinit = 1; } if (enccrd) { pasemi_desc_start(&init_desc, XCT_CTRL_HDR(chsel, reinit ? reinit_size : 0x10, DMA_FN_CIV0)); pasemi_desc_build(&init_desc, XCT_FUN_SRC_PTR(reinit ? reinit_size : 0x10, ses->dma_addr)); } if (((txring->next_to_fill + pasemi_desc_size(&init_desc) + pasemi_desc_size(&work_desc)) - txring->next_to_clean) > TX_RING_SIZE) { spin_unlock_irqrestore(&txring->fill_lock, flags); err = ERESTART; goto errout; } pasemi_ring_add_desc(txring, &init_desc, NULL); pasemi_ring_add_desc(txring, &work_desc, crp); pasemi_ring_incr(sc, chsel, pasemi_desc_size(&init_desc) + pasemi_desc_size(&work_desc)); spin_unlock_irqrestore(&txring->fill_lock, flags); mod_timer(&txring->crypto_timer, jiffies + TIMER_INTERVAL); return 0; erralg: printk(DRV_NAME ": unsupported algorithm or algorithm order alg1 %d alg2 %d\n", crd1->crd_alg, crd2->crd_alg); err = -EINVAL; errout: if (err != ERESTART) { crp->crp_etype = err; crypto_done(crp); } return err; }
static int mmrw(cdev_t dev, struct uio *uio, int flags) { int o; u_int c; u_int poolsize; u_long v; struct iovec *iov; int error = 0; caddr_t buf = NULL; while (uio->uio_resid > 0 && error == 0) { iov = uio->uio_iov; if (iov->iov_len == 0) { uio->uio_iov++; uio->uio_iovcnt--; if (uio->uio_iovcnt < 0) panic("mmrw"); continue; } switch (minor(dev)) { case 0: /* * minor device 0 is physical memory, /dev/mem */ v = uio->uio_offset; v &= ~(long)PAGE_MASK; pmap_kenter((vm_offset_t)ptvmmap, v); o = (int)uio->uio_offset & PAGE_MASK; c = (u_int)(PAGE_SIZE - ((uintptr_t)iov->iov_base & PAGE_MASK)); c = min(c, (u_int)(PAGE_SIZE - o)); c = min(c, (u_int)iov->iov_len); error = uiomove((caddr_t)&ptvmmap[o], (int)c, uio); pmap_kremove((vm_offset_t)ptvmmap); continue; case 1: { /* * minor device 1 is kernel memory, /dev/kmem */ vm_offset_t saddr, eaddr; int prot; c = iov->iov_len; /* * Make sure that all of the pages are currently * resident so that we don't create any zero-fill * pages. */ saddr = trunc_page(uio->uio_offset); eaddr = round_page(uio->uio_offset + c); if (saddr > eaddr) return EFAULT; /* * Make sure the kernel addresses are mapped. * platform_direct_mapped() can be used to bypass * default mapping via the page table (virtual kernels * contain a lot of out-of-band data). */ prot = VM_PROT_READ; if (uio->uio_rw != UIO_READ) prot |= VM_PROT_WRITE; error = kvm_access_check(saddr, eaddr, prot); if (error) return (error); error = uiomove((caddr_t)(vm_offset_t)uio->uio_offset, (int)c, uio); continue; } case 2: /* * minor device 2 (/dev/null) is EOF/RATHOLE */ if (uio->uio_rw == UIO_READ) return (0); c = iov->iov_len; break; case 3: /* * minor device 3 (/dev/random) is source of filth * on read, seeder on write */ if (buf == NULL) buf = kmalloc(PAGE_SIZE, M_TEMP, M_WAITOK); c = min(iov->iov_len, PAGE_SIZE); if (uio->uio_rw == UIO_WRITE) { error = uiomove(buf, (int)c, uio); if (error == 0 && seedenable && securelevel <= 0) { error = add_buffer_randomness_src(buf, c, RAND_SRC_SEEDING); } else if (error == 0) { error = EPERM; } } else { poolsize = read_random(buf, c); if (poolsize == 0) { if (buf) kfree(buf, M_TEMP); if ((flags & IO_NDELAY) != 0) return (EWOULDBLOCK); return (0); } c = min(c, poolsize); error = uiomove(buf, (int)c, uio); } continue; case 4: /* * minor device 4 (/dev/urandom) is source of muck * on read, writes are disallowed. */ c = min(iov->iov_len, PAGE_SIZE); if (uio->uio_rw == UIO_WRITE) { error = EPERM; break; } if (CURSIG(curthread->td_lwp) != 0) { /* * Use tsleep() to get the error code right. * It should return immediately. */ error = tsleep(&rand_bolt, PCATCH, "urand", 1); if (error != 0 && error != EWOULDBLOCK) continue; } if (buf == NULL) buf = kmalloc(PAGE_SIZE, M_TEMP, M_WAITOK); poolsize = read_random_unlimited(buf, c); c = min(c, poolsize); error = uiomove(buf, (int)c, uio); continue; case 12: /* * minor device 12 (/dev/zero) is source of nulls * on read, write are disallowed. */ if (uio->uio_rw == UIO_WRITE) { c = iov->iov_len; break; } if (zbuf == NULL) { zbuf = (caddr_t)kmalloc(PAGE_SIZE, M_TEMP, M_WAITOK | M_ZERO); } c = min(iov->iov_len, PAGE_SIZE); error = uiomove(zbuf, (int)c, uio); continue; default: return (ENODEV); } if (error) break; iov->iov_base = (char *)iov->iov_base + c; iov->iov_len -= c; uio->uio_offset += c; uio->uio_resid -= c; } if (buf) kfree(buf, M_TEMP); return (error); }
static void __guard_setup(void) { /* Cannot report failure. */ read_random(__stack_chk_guard, sizeof(__stack_chk_guard)); }
static int pread_f( int argc, char **argv) { size_t bsize; off64_t offset; unsigned int zeed = 0; long long count, total, tmp; size_t fsblocksize, fssectsize; struct timeval t1, t2; char *sp; int Cflag, qflag, uflag, vflag; int eof = 0, direction = IO_FORWARD; int c; Cflag = qflag = uflag = vflag = 0; init_cvtnum(&fsblocksize, &fssectsize); bsize = fsblocksize; while ((c = getopt(argc, argv, "b:BCFRquvV:Z:")) != EOF) { switch (c) { case 'b': tmp = cvtnum(fsblocksize, fssectsize, optarg); if (tmp < 0) { printf(_("non-numeric bsize -- %s\n"), optarg); return 0; } bsize = tmp; break; case 'C': Cflag = 1; break; case 'F': direction = IO_FORWARD; break; case 'B': direction = IO_BACKWARD; break; case 'R': direction = IO_RANDOM; break; case 'q': qflag = 1; break; case 'u': uflag = 1; break; case 'v': vflag = 1; break; #ifdef HAVE_PREADV case 'V': vectors = strtoul(optarg, &sp, 0); if (!sp || sp == optarg) { printf(_("non-numeric vector count == %s\n"), optarg); return 0; } break; #endif case 'Z': zeed = strtoul(optarg, &sp, 0); if (!sp || sp == optarg) { printf(_("non-numeric seed -- %s\n"), optarg); return 0; } break; default: return command_usage(&pread_cmd); } } if (optind != argc - 2) return command_usage(&pread_cmd); offset = cvtnum(fsblocksize, fssectsize, argv[optind]); if (offset < 0 && (direction & (IO_RANDOM|IO_BACKWARD))) { eof = -1; /* read from EOF */ } else if (offset < 0) { printf(_("non-numeric length argument -- %s\n"), argv[optind]); return 0; } optind++; count = cvtnum(fsblocksize, fssectsize, argv[optind]); if (count < 0 && (direction & (IO_RANDOM|IO_FORWARD))) { eof = -1; /* read to EOF */ } else if (count < 0) { printf(_("non-numeric length argument -- %s\n"), argv[optind]); return 0; } if (alloc_buffer(bsize, uflag, 0xabababab) < 0) return 0; gettimeofday(&t1, NULL); switch (direction) { case IO_RANDOM: if (!zeed) /* srandom seed */ zeed = time(NULL); c = read_random(file->fd, offset, count, &total, zeed, eof); break; case IO_FORWARD: c = read_forward(file->fd, offset, count, &total, vflag, 0, eof); if (eof) count = total; break; case IO_BACKWARD: c = read_backward(file->fd, &offset, &count, &total, eof); break; default: ASSERT(0); } if (c < 0) return 0; if (qflag) return 0; gettimeofday(&t2, NULL); t2 = tsub(t2, t1); report_io_times("read", &t2, (long long)offset, count, total, c, Cflag); return 0; }