static inline u_int l2_hash(struct l2t_data *d, const struct sockaddr *sa, int ifindex) { u_int hash, half = d->l2t_size / 2, start = 0; const void *key; size_t len; KASSERT(sa->sa_family == AF_INET || sa->sa_family == AF_INET6, ("%s: sa %p has unexpected sa_family %d", __func__, sa, sa->sa_family)); if (sa->sa_family == AF_INET) { const struct sockaddr_in *sin = (const void *)sa; key = &sin->sin_addr; len = sizeof(sin->sin_addr); } else { const struct sockaddr_in6 *sin6 = (const void *)sa; key = &sin6->sin6_addr; len = sizeof(sin6->sin6_addr); start = half; } hash = fnv_32_buf(key, len, FNV1_32_INIT); hash = fnv_32_buf(&ifindex, sizeof(ifindex), hash); hash %= half; return (hash + start); }
/* ** Returns the filename we want to use. According to the convention, this ** can be the msgnumber, the msgid, or some other kind of name. */ char *message_name (struct emailinfo *email) { static char buffer[8 + sizeof (time_t) * 2 + 1]; #ifdef HAVE_LIBFNV if (set_nonsequential && email->msgid) { /* Call the FNV msg hash library */ Fnv32_t hash_val; hash_val = fnv_32_buf(email->msgid, strlen (email->msgid), FNV1_32_INIT); /* the line below is what we used before when the hash included the fromdate string, and we didn't concatenate the mail date. However, we changed strategies to avoid collisions. */ /* hash_val = fnv_32_str(email->fromdatestr, hash_val); */ sprintf (buffer, "%08x%08x", hash_val, email->fromdate); return buffer; } else { #endif /* HAVE_LIBFNV */ sprintf (buffer, "%.4d", email->msgnum); return buffer; #ifdef HAVE_LIBFNV } #endif /* HAVE_LIBFNV */ }
/* * andna_32bit_hash * * It returns the 32bit hash of the md5 hash of the `hname' string. */ u_int andna_32bit_hash(char *hname) { u_char hashm5[ANDNA_HASH_SZ]; hash_md5((u_char *) hname, strlen(hname), hashm5); return fnv_32_buf(hashm5, ANDNA_HASH_SZ, FNV1_32_INIT); }
uint32_t hashmap_GetIndex(hashmap* h, const char* buf, size_t len, int ignorecase) { uint32_t index; if (ignorecase) { index = fnv_32_upper_buf(buf, len) % h->size; }else{ index = fnv_32_buf(buf, len) % h->size; } return index; }
static void hpt_insert(kvm_t *kd, uint64_t pa, int64_t off) { struct hpte *hpte; uint32_t fnv = FNV1_32_INIT; fnv = fnv_32_buf(&pa, sizeof(pa), fnv); fnv &= (HPT_SIZE - 1); hpte = malloc(sizeof(*hpte)); hpte->pa = pa; hpte->off = off; hpte->next = kd->vmst->hpt_head[fnv]; kd->vmst->hpt_head[fnv] = hpte; }
static int64_t hpt_find(kvm_t *kd, uint64_t pa) { struct hpte *hpte; uint32_t fnv = FNV1_32_INIT; fnv = fnv_32_buf(&pa, sizeof(pa), fnv); fnv &= (HPT_SIZE - 1); for (hpte = kd->vmst->hpt_head[fnv]; hpte != NULL; hpte = hpte->next) { if (pa == hpte->pa) return (hpte->off); } return (-1); }
/* Calculates a hash key for a (ip1, port1)<->(ip2, port2) tcp session */ static uint32_t getTcpSessionHash( uint32_t ip1, uint16_t port1, uint32_t ip2, uint16_t port2 ) { uint32_t hash; if( ip1 < ip2 ) { hash = fnv_32_buf( &ip1, sizeof(ip1), FNV1_32_INIT ); hash = fnv_32_buf( &port1, sizeof(port1), hash ); hash = fnv_32_buf( &ip2, sizeof(ip2), hash ); hash = fnv_32_buf( &port2, sizeof(port2), hash ); } else { hash = fnv_32_buf( &ip2, sizeof(ip2), FNV1_32_INIT ); hash = fnv_32_buf( &port2, sizeof(port2), hash ); hash = fnv_32_buf( &ip1, sizeof(ip1), hash ); hash = fnv_32_buf( &port1, sizeof(port1), hash ); } return hash; }
static uint32_t __inline fuse_vnode_hash(uint64_t id) { return (fnv_32_buf(&id, sizeof(id), FNV1_32_INIT)); }
PHP_HASH_API void PHP_FNV1a32Update(PHP_FNV132_CTX *context, const unsigned char *input, unsigned int inputLen) { context->state = fnv_32_buf((void *)input, inputLen, context->state, 1); }
static uint32_t GetSessionIDCache( u_char* session_id ) { return fnv_32_buf( session_id, sizeof(DSSL_SESSION_ID_SIZE), FNV1_32_INIT ); }
/* * test_fnv32 - test the FNV32 hash * * given: * hash_type type of FNV hash to test * init_hval initial hash value * mask lower bit mask * v_flag 1 => print test failure info on stderr * code 0 ==> generate FNV test vectors * 1 ==> validate against FNV test vectors * * returns: 0 ==> OK, else test vector failure number */ static int test_fnv32(enum fnv_type hash_type, Fnv32_t init_hval, Fnv32_t mask, int v_flag, int code) { struct test_vector *t; /* FNV test vestor */ Fnv32_t hval; /* current hash value */ int tstnum; /* test vector that failed, starting at 1 */ /* * print preamble if generating test vectors */ if (code == 0) { switch (hash_type) { case FNV0_32: printf("struct fnv0_32_test_vector fnv0_32_vector[] = {\n"); break; case FNV1_32: printf("struct fnv1_32_test_vector fnv1_32_vector[] = {\n"); break; case FNV1a_32: printf("struct fnv1a_32_test_vector fnv1a_32_vector[] = {\n"); break; default: unknown_hash_type(program, hash_type, 12); /* exit(12) */ /*NOTREACHED*/ } } /* * loop thru all test vectors */ for (t = fnv_test_str, tstnum = 1; t->buf != NULL; ++t, ++tstnum) { /* * compute the FNV hash */ hval = init_hval; switch (hash_type) { case FNV0_32: case FNV1_32: hval = fnv_32_buf(t->buf, t->len, hval); break; case FNV1a_32: hval = fnv_32a_buf(t->buf, t->len, hval); break; default: unknown_hash_type(program, hash_type, 13); /* exit(13) */ /*NOTREACHED*/ } /* * print the vector */ switch (code) { case 0: /* generate the test vector */ printf(" { &fnv_test_str[%d], (Fnv32_t) 0x%08lxUL },\n", tstnum-1, hval & mask); break; case 1: /* validate against test vector */ switch (hash_type) { case FNV0_32: if ((hval&mask) != (fnv0_32_vector[tstnum-1].fnv0_32 & mask)) { if (v_flag) { fprintf(stderr, "%s: failed fnv0_32 test # %d\n", program, tstnum); fprintf(stderr, "%s: test # 1 is 1st test\n", program); fprintf(stderr, "%s: expected 0x%08lx != generated: 0x%08lx\n", program, (hval&mask), (fnv0_32_vector[tstnum-1].fnv0_32 & mask)); } return tstnum; } break; case FNV1_32: if ((hval&mask) != (fnv1_32_vector[tstnum-1].fnv1_32 & mask)) { if (v_flag) { fprintf(stderr, "%s: failed fnv1_32 test # %d\n", program, tstnum); fprintf(stderr, "%s: test # 1 is 1st test\n", program); fprintf(stderr, "%s: expected 0x%08lx != generated: 0x%08lx\n", program, (hval&mask), (fnv1_32_vector[tstnum-1].fnv1_32 & mask)); } return tstnum; } break; case FNV1a_32: if ((hval&mask) != (fnv1a_32_vector[tstnum-1].fnv1a_32 &mask)) { if (v_flag) { fprintf(stderr, "%s: failed fnv1a_32 test # %d\n", program, tstnum); fprintf(stderr, "%s: test # 1 is 1st test\n", program); fprintf(stderr, "%s: expected 0x%08lx != generated: 0x%08lx\n", program, (hval&mask), (fnv1a_32_vector[tstnum-1].fnv1a_32 & mask)); } return tstnum; } break; } break; default: fprintf(stderr, "%s: -m %d not implemented yet\n", program, code); exit(14); } } /* * print completion if generating test vectors */ if (code == 0) { printf(" { NULL, 0 }\n"); printf("};\n"); } /* * no failures, return code 0 ==> all OK */ return 0; }
u_int32_t __inline smbfs_hash(const u_char *name, int nmlen) { return (fnv_32_buf(name, nmlen, FNV1_32_INIT)); }
static int sysctl_integriforce_so(SYSCTL_HANDLER_ARGS) { integriforce_so_check_t *integriforce_so; secadm_prison_entry_t *entry; secadm_rule_t r, *rule; struct nameidata nd; struct vattr vap; secadm_key_t key; int err; if (!(req->newptr) || req->newlen != sizeof(integriforce_so_check_t)) return (EINVAL); if (!(req->oldptr) || req->oldlen != sizeof(integriforce_so_check_t)) return (EINVAL); integriforce_so = malloc(sizeof(integriforce_so_check_t), M_SECADM, M_WAITOK); err = SYSCTL_IN(req, integriforce_so, sizeof(integriforce_so_check_t)); if (err) { free(integriforce_so, M_SECADM); return (err); } NDINIT(&nd, LOOKUP, FOLLOW, UIO_SYSSPACE, integriforce_so->isc_path, req->td); err = namei(&nd); if (err) { free(integriforce_so, M_SECADM); NDFREE(&nd, 0); return (err); } if ((err = vn_lock(nd.ni_vp, LK_SHARED | LK_RETRY)) != 0) { free(integriforce_so, M_SECADM); NDFREE(&nd, 0); return (err); } err = VOP_GETATTR(nd.ni_vp, &vap, req->td->td_ucred); if (err) { free(integriforce_so, M_SECADM); NDFREE(&nd, 0); return (err); } VOP_UNLOCK(nd.ni_vp, 0); key.sk_jid = req->td->td_ucred->cr_prison->pr_id; key.sk_type = secadm_integriforce_rule; key.sk_fileid = vap.va_fileid; strncpy(key.sk_mntonname, nd.ni_vp->v_mount->mnt_stat.f_mntonname, MNAMELEN); r.sr_key = fnv_32_buf(&key, sizeof(secadm_key_t), FNV1_32_INIT); entry = get_prison_list_entry( req->td->td_ucred->cr_prison->pr_id); PE_RLOCK(entry); rule = RB_FIND(secadm_rules_tree, &(entry->sp_rules), &r); if (rule) { integriforce_so->isc_result = do_integriforce_check(rule, &vap, nd.ni_vp, req->td->td_ucred); } PE_RUNLOCK(entry); SYSCTL_OUT(req, integriforce_so, sizeof(integriforce_so_check_t)); free(integriforce_so, M_SECADM); NDFREE(&nd, 0); return (0); }
/* * Look up a vnode/nfsnode by file handle. * Callers must check for mount points!! * In all cases, a pointer to a * nfsnode structure is returned. */ int nfs_nget(struct mount *mntp, nfsfh_t *fhp, int fhsize, struct nfsnode **npp, int flags) { struct thread *td = curthread; /* XXX */ struct nfsnode *np; struct vnode *vp; struct vnode *nvp; int error; u_int hash; struct nfsmount *nmp; struct nfs_vncmp ncmp; nmp = VFSTONFS(mntp); *npp = NULL; hash = fnv_32_buf(fhp->fh_bytes, fhsize, FNV1_32_INIT); ncmp.fhsize = fhsize; ncmp.fh = fhp; error = vfs_hash_get(mntp, hash, flags, td, &nvp, nfs_vncmpf, &ncmp); if (error) return (error); if (nvp != NULL) { *npp = VTONFS(nvp); return (0); } /* * Allocate before getnewvnode since doing so afterward * might cause a bogus v_data pointer to get dereferenced * elsewhere if zalloc should block. */ np = uma_zalloc(nfsnode_zone, M_WAITOK | M_ZERO); error = getnewvnode("nfs", mntp, &nfs_vnodeops, &nvp); if (error) { uma_zfree(nfsnode_zone, np); return (error); } vp = nvp; vp->v_bufobj.bo_ops = &buf_ops_nfs; vp->v_data = np; np->n_vnode = vp; /* * Initialize the mutex even if the vnode is going to be a loser. * This simplifies the logic in reclaim, which can then unconditionally * destroy the mutex (in the case of the loser, or if hash_insert happened * to return an error no special casing is needed). */ mtx_init(&np->n_mtx, "NFSnode lock", NULL, MTX_DEF); /* * NFS supports recursive and shared locking. */ lockmgr(vp->v_vnlock, LK_EXCLUSIVE | LK_NOWITNESS, NULL); VN_LOCK_AREC(vp); VN_LOCK_ASHARE(vp); if (fhsize > NFS_SMALLFH) { np->n_fhp = malloc(fhsize, M_NFSBIGFH, M_WAITOK); } else np->n_fhp = &np->n_fh; bcopy((caddr_t)fhp, (caddr_t)np->n_fhp, fhsize); np->n_fhsize = fhsize; error = insmntque(vp, mntp); if (error != 0) { *npp = NULL; if (np->n_fhsize > NFS_SMALLFH) { free((caddr_t)np->n_fhp, M_NFSBIGFH); } mtx_destroy(&np->n_mtx); uma_zfree(nfsnode_zone, np); return (error); } error = vfs_hash_insert(vp, hash, flags, td, &nvp, nfs_vncmpf, &ncmp); if (error) return (error); if (nvp != NULL) { *npp = VTONFS(nvp); /* vfs_hash_insert() vput()'s the losing vnode */ return (0); } *npp = np; return (0); }
int nfs_nget(struct mount *mntp, nfsfh_t *fhp, int fhsize, struct nfsnode **npp) { struct nfsnode *np, *np2; struct nfsnodehashhead *nhpp; struct vnode *vp; int error; int lkflags; struct nfsmount *nmp; /* * Calculate nfs mount point and figure out whether the rslock should * be interruptable or not. */ nmp = VFSTONFS(mntp); if (nmp->nm_flag & NFSMNT_INT) lkflags = LK_PCATCH; else lkflags = 0; lwkt_gettoken(&nfsnhash_token); retry: nhpp = NFSNOHASH(fnv_32_buf(fhp->fh_bytes, fhsize, FNV1_32_INIT)); loop: for (np = nhpp->lh_first; np; np = np->n_hash.le_next) { if (mntp != NFSTOV(np)->v_mount || np->n_fhsize != fhsize || bcmp((caddr_t)fhp, (caddr_t)np->n_fhp, fhsize)) { continue; } vp = NFSTOV(np); if (vget(vp, LK_EXCLUSIVE)) goto loop; for (np = nhpp->lh_first; np; np = np->n_hash.le_next) { if (mntp == NFSTOV(np)->v_mount && np->n_fhsize == fhsize && bcmp((caddr_t)fhp, (caddr_t)np->n_fhp, fhsize) == 0 ) { break; } } if (np == NULL || NFSTOV(np) != vp) { vput(vp); goto loop; } *npp = np; lwkt_reltoken(&nfsnhash_token); return(0); } /* * Obtain a lock to prevent a race condition if the getnewvnode() * or MALLOC() below happens to block. */ if (lockmgr(&nfsnhash_lock, LK_EXCLUSIVE | LK_SLEEPFAIL)) goto loop; /* * Allocate before getnewvnode since doing so afterward * might cause a bogus v_data pointer to get dereferenced * elsewhere if objcache should block. */ np = objcache_get(nfsnode_objcache, M_WAITOK); error = getnewvnode(VT_NFS, mntp, &vp, 0, 0); if (error) { lockmgr(&nfsnhash_lock, LK_RELEASE); *npp = NULL; objcache_put(nfsnode_objcache, np); lwkt_reltoken(&nfsnhash_token); return (error); } /* * Initialize most of (np). */ bzero(np, sizeof (*np)); if (fhsize > NFS_SMALLFH) { MALLOC(np->n_fhp, nfsfh_t *, fhsize, M_NFSBIGFH, M_WAITOK); } else {
/* * Look up a vnode/nfsnode by file handle. * Callers must check for mount points!! * In all cases, a pointer to a * nfsnode structure is returned. * This variant takes a "struct nfsfh *" as second argument and uses * that structure up, either by hanging off the nfsnode or FREEing it. */ int nfscl_nget(struct mount *mntp, struct vnode *dvp, struct nfsfh *nfhp, struct componentname *cnp, struct thread *td, struct nfsnode **npp, void *stuff, int lkflags) { struct nfsnode *np, *dnp; struct vnode *vp, *nvp; struct nfsv4node *newd, *oldd; int error; u_int hash; struct nfsmount *nmp; nmp = VFSTONFS(mntp); dnp = VTONFS(dvp); *npp = NULL; hash = fnv_32_buf(nfhp->nfh_fh, nfhp->nfh_len, FNV1_32_INIT); error = vfs_hash_get(mntp, hash, lkflags, td, &nvp, newnfs_vncmpf, nfhp); if (error == 0 && nvp != NULL) { /* * I believe there is a slight chance that vgonel() could * get called on this vnode between when NFSVOPLOCK() drops * the VI_LOCK() and vget() acquires it again, so that it * hasn't yet had v_usecount incremented. If this were to * happen, the VI_DOOMED flag would be set, so check for * that here. Since we now have the v_usecount incremented, * we should be ok until we vrele() it, if the VI_DOOMED * flag isn't set now. */ VI_LOCK(nvp); if ((nvp->v_iflag & VI_DOOMED)) { VI_UNLOCK(nvp); vrele(nvp); error = ENOENT; } else { VI_UNLOCK(nvp); } } if (error) { FREE((caddr_t)nfhp, M_NFSFH); return (error); } if (nvp != NULL) { np = VTONFS(nvp); /* * For NFSv4, check to see if it is the same name and * replace the name, if it is different. */ oldd = newd = NULL; if ((nmp->nm_flag & NFSMNT_NFSV4) && np->n_v4 != NULL && nvp->v_type == VREG && (np->n_v4->n4_namelen != cnp->cn_namelen || NFSBCMP(cnp->cn_nameptr, NFS4NODENAME(np->n_v4), cnp->cn_namelen) || dnp->n_fhp->nfh_len != np->n_v4->n4_fhlen || NFSBCMP(dnp->n_fhp->nfh_fh, np->n_v4->n4_data, dnp->n_fhp->nfh_len))) { MALLOC(newd, struct nfsv4node *, sizeof (struct nfsv4node) + dnp->n_fhp->nfh_len + + cnp->cn_namelen - 1, M_NFSV4NODE, M_WAITOK); NFSLOCKNODE(np); if (newd != NULL && np->n_v4 != NULL && nvp->v_type == VREG && (np->n_v4->n4_namelen != cnp->cn_namelen || NFSBCMP(cnp->cn_nameptr, NFS4NODENAME(np->n_v4), cnp->cn_namelen) || dnp->n_fhp->nfh_len != np->n_v4->n4_fhlen || NFSBCMP(dnp->n_fhp->nfh_fh, np->n_v4->n4_data, dnp->n_fhp->nfh_len))) { oldd = np->n_v4; np->n_v4 = newd; newd = NULL; np->n_v4->n4_fhlen = dnp->n_fhp->nfh_len; np->n_v4->n4_namelen = cnp->cn_namelen; NFSBCOPY(dnp->n_fhp->nfh_fh, np->n_v4->n4_data, dnp->n_fhp->nfh_len); NFSBCOPY(cnp->cn_nameptr, NFS4NODENAME(np->n_v4), cnp->cn_namelen); } NFSUNLOCKNODE(np); }
/* * ONLY USED FOR THE ROOT DIRECTORY. nfscl_nget() does the rest. If this * function is going to be used to get Regular Files, code must be added * to fill in the "struct nfsv4node". * Look up a vnode/nfsnode by file handle. * Callers must check for mount points!! * In all cases, a pointer to a * nfsnode structure is returned. */ int ncl_nget(struct mount *mntp, u_int8_t *fhp, int fhsize, struct nfsnode **npp, int lkflags) { struct thread *td = curthread; /* XXX */ struct nfsnode *np; struct vnode *vp; struct vnode *nvp; int error; u_int hash; struct nfsmount *nmp; struct nfsfh *nfhp; nmp = VFSTONFS(mntp); *npp = NULL; hash = fnv_32_buf(fhp, fhsize, FNV1_32_INIT); MALLOC(nfhp, struct nfsfh *, sizeof (struct nfsfh) + fhsize, M_NFSFH, M_WAITOK); bcopy(fhp, &nfhp->nfh_fh[0], fhsize); nfhp->nfh_len = fhsize; error = vfs_hash_get(mntp, hash, lkflags, td, &nvp, newnfs_vncmpf, nfhp); FREE(nfhp, M_NFSFH); if (error) return (error); if (nvp != NULL) { *npp = VTONFS(nvp); return (0); } np = uma_zalloc(newnfsnode_zone, M_WAITOK | M_ZERO); error = getnewvnode("newnfs", mntp, &newnfs_vnodeops, &nvp); if (error) { uma_zfree(newnfsnode_zone, np); return (error); } vp = nvp; KASSERT(vp->v_bufobj.bo_bsize != 0, ("ncl_nget: bo_bsize == 0")); vp->v_bufobj.bo_ops = &buf_ops_newnfs; vp->v_data = np; np->n_vnode = vp; /* * Initialize the mutex even if the vnode is going to be a loser. * This simplifies the logic in reclaim, which can then unconditionally * destroy the mutex (in the case of the loser, or if hash_insert * happened to return an error no special casing is needed). */ mtx_init(&np->n_mtx, "NEWNFSnode lock", NULL, MTX_DEF | MTX_DUPOK); /* * NFS supports recursive and shared locking. */ lockmgr(vp->v_vnlock, LK_EXCLUSIVE | LK_NOWITNESS, NULL); VN_LOCK_AREC(vp); VN_LOCK_ASHARE(vp); /* * Are we getting the root? If so, make sure the vnode flags * are correct */ if ((fhsize == nmp->nm_fhsize) && !bcmp(fhp, nmp->nm_fh, fhsize)) { if (vp->v_type == VNON) vp->v_type = VDIR; vp->v_vflag |= VV_ROOT; } MALLOC(np->n_fhp, struct nfsfh *, sizeof (struct nfsfh) + fhsize, M_NFSFH, M_WAITOK); bcopy(fhp, np->n_fhp->nfh_fh, fhsize); np->n_fhp->nfh_len = fhsize; error = insmntque(vp, mntp); if (error != 0) { *npp = NULL; FREE((caddr_t)np->n_fhp, M_NFSFH); mtx_destroy(&np->n_mtx); uma_zfree(newnfsnode_zone, np); return (error); } error = vfs_hash_insert(vp, hash, lkflags, td, &nvp, newnfs_vncmpf, np->n_fhp); if (error) return (error); if (nvp != NULL) { *npp = VTONFS(nvp); /* vfs_hash_insert() vput()'s the losing vnode */ return (0); } *npp = np; return (0); }
/* * main - the main function * * See the above usage for details. */ int main(int argc, char *argv[]) { char buf[BUF_SIZE+1]; /* read buffer */ int readcnt; /* number of characters written */ Fnv32_t hval; /* current hash value */ int s_flag = 0; /* 1 => -s was given, hash args as strings */ int m_flag = 0; /* 1 => print multiple hashes, one per arg */ int v_flag = 0; /* 1 => verbose hash print */ int b_flag = WIDTH; /* -b flag value */ int t_flag = -1; /* FNV test vector code (0=>print, 1=>test) */ enum fnv_type hash_type = FNV_NONE; /* type of FNV hash to perform */ Fnv32_t bmask; /* mask to apply to output */ extern char *optarg; /* option argument */ extern int optind; /* argv index of the next arg */ int fd; /* open file to process */ char *p; int i; /* * parse args */ program = argv[0]; while ((i = getopt(argc, argv, "b:mst:v")) != -1) { switch (i) { case 'b': /* bcnt bit mask count */ b_flag = atoi(optarg); break; case 'm': /* print multiple hashes, one per arg */ m_flag = 1; break; case 's': /* hash args as strings */ s_flag = 1; break; case 't': /* FNV test vector code */ t_flag = atoi(optarg); if (t_flag < 0 || t_flag > 1) { fprintf(stderr, "%s: -t code must be 0 or 1\n", program); fprintf(stderr, usage, program, FNV_VERSION); exit(1); } m_flag = 1; break; case 'v': /* verbose hash print */ m_flag = 1; v_flag = 1; break; default: fprintf(stderr, usage, program, FNV_VERSION); exit(1); } } /* -t code incompatible with -b, -m and args */ if (t_flag >= 0) { if (b_flag != WIDTH) { fprintf(stderr, "%s: -t code incompatible with -b\n", program); exit(2); } if (s_flag != 0) { fprintf(stderr, "%s: -t code incompatible with -s\n", program); exit(3); } if (optind < argc) { fprintf(stderr, "%s: -t code incompatible args\n", program); exit(4); } } /* -s requires at least 1 arg */ if (s_flag && optind >= argc) { fprintf(stderr, usage, program, FNV_VERSION); exit(5); } /* limit -b values */ if (b_flag < 0 || b_flag > WIDTH) { fprintf(stderr, "%s: -b bcnt: %d must be >= 0 and < %d\n", program, b_flag, WIDTH); exit(6); } if (b_flag == WIDTH) { bmask = (Fnv32_t)0xffffffff; } else { bmask = (Fnv32_t)((1 << b_flag) - 1); } /* * start with the initial basis depending on the hash type */ p = strrchr(program, '/'); if (p == NULL) { p = program; } else { ++p; } if (strcmp(p, "fnv032") == 0) { /* using non-recommended FNV-0 and zero initial basis */ hval = FNV0_32_INIT; hash_type = FNV0_32; } else if (strcmp(p, "fnv132") == 0) { /* using FNV-1 and non-zero initial basis */ hval = FNV1_32_INIT; hash_type = FNV1_32; } else if (strcmp(p, "fnv1a32") == 0) { /* start with the FNV-1a initial basis */ hval = FNV1_32A_INIT; hash_type = FNV1a_32; } else { fprintf(stderr, "%s: unknown program name, unknown hash type\n", program); exit(7); } /* * FNV test vector processing, if needed */ if (t_flag >= 0) { int code; /* test vector that failed, starting at 1 */ /* * perform all tests */ code = test_fnv32(hash_type, hval, bmask, v_flag, t_flag); /* * evaluate the tests */ if (code == 0) { if (v_flag) { printf("passed\n"); } exit(0); } else { printf("failed vector (1 is 1st test): %d\n", code); exit(8); } } /* * string hashing */ if (s_flag) { /* hash any other strings */ for (i=optind; i < argc; ++i) { switch (hash_type) { case FNV0_32: case FNV1_32: hval = fnv_32_str(argv[i], hval); break; case FNV1a_32: hval = fnv_32a_str(argv[i], hval); break; default: unknown_hash_type(program, hash_type, 9); /* exit(9) */ /*NOTREACHED*/ } if (m_flag) { print_fnv32(hval, bmask, v_flag, argv[i]); } } /* * file hashing */ } else { /* * case: process only stdin */ if (optind >= argc) { /* case: process only stdin */ while ((readcnt = read(0, buf, BUF_SIZE)) > 0) { switch (hash_type) { case FNV0_32: case FNV1_32: hval = fnv_32_buf(buf, readcnt, hval); break; case FNV1a_32: hval = fnv_32a_buf(buf, readcnt, hval); default: unknown_hash_type(program, hash_type, 10); /* exit(10) */ /*NOTREACHED*/ } } if (m_flag) { print_fnv32(hval, bmask, v_flag, "(stdin)"); } } else { /* * process any other files */ for (i=optind; i < argc; ++i) { /* open the file */ fd = open(argv[i], O_RDONLY); if (fd < 0) { fprintf(stderr, "%s: unable to open file: %s\n", program, argv[i]); exit(4); } /* hash the file */ while ((readcnt = read(fd, buf, BUF_SIZE)) > 0) { switch (hash_type) { case FNV0_32: case FNV1_32: hval = fnv_32_buf(buf, readcnt, hval); break; case FNV1a_32: hval = fnv_32a_buf(buf, readcnt, hval); default: unknown_hash_type(program, hash_type, 11);/* exit(11) */ /*NOTREACHED*/ } } /* finish processing the file */ if (m_flag) { print_fnv32(hval, bmask, v_flag, argv[i]); } close(fd); } } } /* * report hash and exit */ if (!m_flag) { print_fnv32(hval, bmask, v_flag, ""); } return 0; /* exit(0); */ }