void rx_upcall(socket_t so, void *arg, __unused int waitflag) { mbuf_t m; int error = 0; int i, flags = 0; struct msghdr msg; struct sockaddr_storage ss; struct sockaddr *sa = NULL; struct sockaddr_in from; struct rx_packet *p; afs_int32 rlen; afs_int32 tlen; afs_int32 savelen; /* was using rlen but had aliasing problems */ size_t nbytes, resid, noffset; p = rxi_AllocPacket(RX_PACKET_CLASS_RECEIVE); rx_computelen(p, tlen); rx_SetDataSize(p, tlen); /* this is the size of the user data area */ tlen += RX_HEADER_SIZE; /* now this is the size of the entire packet */ rlen = rx_maxJumboRecvSize; /* this is what I am advertising. Only check * it once in order to avoid races. */ tlen = rlen - tlen; if (tlen > 0) { tlen = rxi_AllocDataBuf(p, tlen, RX_PACKET_CLASS_RECV_CBUF); if (tlen > 0) { tlen = rlen - tlen; } else tlen = rlen; } else tlen = rlen; /* add some padding to the last iovec, it's just to make sure that the * read doesn't return more data than we expect, and is done to get around * our problems caused by the lack of a length field in the rx header. */ savelen = p->wirevec[p->niovecs - 1].iov_len; p->wirevec[p->niovecs - 1].iov_len = savelen + RX_EXTRABUFFERSIZE; resid = nbytes = tlen + sizeof(afs_int32); memset(&msg, 0, sizeof(struct msghdr)); msg.msg_name = &ss; msg.msg_namelen = sizeof(struct sockaddr_storage); sa =(struct sockaddr *) &ss; do { m = NULL; error = sock_receivembuf(so, &msg, &m, MSG_DONTWAIT, &nbytes); if (!error) { size_t sz, offset = 0; noffset = 0; resid = nbytes; for (i=0;i<p->niovecs && resid;i++) { sz=MIN(resid, p->wirevec[i].iov_len); error = mbuf_copydata(m, offset, sz, p->wirevec[i].iov_base); if (error) break; resid-=sz; offset+=sz; noffset += sz; } } } while (0); mbuf_freem(m); /* restore the vec to its correct state */ p->wirevec[p->niovecs - 1].iov_len = savelen; if (error == EWOULDBLOCK && noffset > 0) error = 0; if (!error) { int host, port; nbytes -= resid; if (sa->sa_family == AF_INET) from = *(struct sockaddr_in *)sa; p->length = nbytes - RX_HEADER_SIZE;; if ((nbytes > tlen) || (p->length & 0x8000)) { /* Bogus packet */ if (nbytes <= 0) { if (rx_stats_active) { MUTEX_ENTER(&rx_stats_mutex); rx_atomic_inc(&rx_stats.bogusPacketOnRead); rx_stats.bogusHost = from.sin_addr.s_addr; MUTEX_EXIT(&rx_stats_mutex); } dpf(("B: bogus packet from [%x,%d] nb=%d", from.sin_addr.s_addr, from.sin_port, nbytes)); } return; } else { /* Extract packet header. */ rxi_DecodePacketHeader(p); host = from.sin_addr.s_addr; port = from.sin_port; if (p->header.type > 0 && p->header.type < RX_N_PACKET_TYPES) { if (rx_stats_active) { rx_atomic_inc(&rx_stats.packetsRead[p->header.type - 1]); } } #ifdef RX_TRIMDATABUFS /* Free any empty packet buffers at the end of this packet */ rxi_TrimDataBufs(p, 1); #endif /* receive pcket */ p = rxi_ReceivePacket(p, so, host, port, 0, 0); } } /* free packet? */ if (p) rxi_FreePacket(p); return; }
int rxkad_GetResponse(struct rx_securityClass *aobj, struct rx_connection *aconn, struct rx_packet *apacket) { struct rxkad_cprivate *tcp; char *tp; int v2; /* whether server is old style or v2 */ afs_int32 challengeID; rxkad_level level; char *response; int responseSize, missing; struct rxkad_v2ChallengeResponse r_v2; struct rxkad_oldChallengeResponse r_old; tcp = (struct rxkad_cprivate *)aobj->privateData; if (!(tcp->type & rxkad_client)) return RXKADINCONSISTENCY; v2 = (rx_Contiguous(apacket) > sizeof(struct rxkad_oldChallenge)); tp = rx_DataOf(apacket); if (v2) { /* v2 challenge */ struct rxkad_v2Challenge *c_v2; if (rx_GetDataSize(apacket) < sizeof(struct rxkad_v2Challenge)) return RXKADPACKETSHORT; c_v2 = (struct rxkad_v2Challenge *)tp; challengeID = ntohl(c_v2->challengeID); level = ntohl(c_v2->level); } else { /* old format challenge */ struct rxkad_oldChallenge *c_old; if (rx_GetDataSize(apacket) < sizeof(struct rxkad_oldChallenge)) return RXKADPACKETSHORT; c_old = (struct rxkad_oldChallenge *)tp; challengeID = ntohl(c_old->challengeID); level = ntohl(c_old->level); } if (level > tcp->level) return RXKADLEVELFAIL; INC_RXKAD_STATS(challenges[rxkad_LevelIndex(tcp->level)]); if (v2) { int i; afs_uint32 xor[2]; memset((void *)&r_v2, 0, sizeof(r_v2)); r_v2.version = htonl(RXKAD_CHALLENGE_PROTOCOL_VERSION); r_v2.spare = 0; (void)rxkad_SetupEndpoint(aconn, &r_v2.encrypted.endpoint); (void)rxi_GetCallNumberVector(aconn, r_v2.encrypted.callNumbers); for (i = 0; i < RX_MAXCALLS; i++) { if (r_v2.encrypted.callNumbers[i] < 0) return RXKADINCONSISTENCY; r_v2.encrypted.callNumbers[i] = htonl(r_v2.encrypted.callNumbers[i]); } r_v2.encrypted.incChallengeID = htonl(challengeID + 1); r_v2.encrypted.level = htonl((afs_int32) tcp->level); r_v2.kvno = htonl(tcp->kvno); r_v2.ticketLen = htonl(tcp->ticketLen); r_v2.encrypted.endpoint.cksum = rxkad_CksumChallengeResponse(&r_v2); memcpy((void *)xor, (void *)tcp->ivec, 2 * sizeof(afs_int32)); fc_cbc_encrypt(&r_v2.encrypted, &r_v2.encrypted, sizeof(r_v2.encrypted), tcp->keysched, xor, ENCRYPT); response = (char *)&r_v2; responseSize = sizeof(r_v2); } else { memset((void *)&r_old, 0, sizeof(r_old)); r_old.encrypted.incChallengeID = htonl(challengeID + 1); r_old.encrypted.level = htonl((afs_int32) tcp->level); r_old.kvno = htonl(tcp->kvno); r_old.ticketLen = htonl(tcp->ticketLen); fc_ecb_encrypt(&r_old.encrypted, &r_old.encrypted, tcp->keysched, ENCRYPT); response = (char *)&r_old; responseSize = sizeof(r_old); } if (RX_MAX_PACKET_DATA_SIZE < responseSize + tcp->ticketLen) return RXKADPACKETSHORT; /* not enough space */ rx_computelen(apacket, missing); missing = responseSize + tcp->ticketLen - missing; if (missing > 0) if (rxi_AllocDataBuf(apacket, missing, RX_PACKET_CLASS_SEND) > 0) return RXKADPACKETSHORT; /* not enough space */ /* copy response and ticket into packet */ rx_packetwrite(apacket, 0, responseSize, response); rx_packetwrite(apacket, responseSize, tcp->ticketLen, tcp->ticket); rx_SetDataSize(apacket, responseSize + tcp->ticketLen); return 0; }