PRIVATE pointer_t rpc__mem_realloc ( pointer_t addr, unsigned32 size, unsigned32 type, unsigned32 flags ATTRIBUTE_UNUSED ) { RPC_MEM_REALLOC_IL(addr, pointer_t, size, type, flags); #ifdef DEBUG if ((type & 0xff) == rpc_g_dbg_switches[rpc_es_dbg_mem_type]) { RPC_DBG_PRINTF(rpc_e_dbg_mem, 1, ("(rpc__mem_realloc) type %x - %x @ %x\n", type, size, addr)); } else { RPC_DBG_PRINTF(rpc_e_dbg_mem, 10, ("(rpc__mem_realloc) type %x - %x @ %x\n", type, size, addr)); } #endif return addr; }
/**************************************************************************** makes lm and nt OWF crypts ****************************************************************************/ void pwd_make_lm_nt_owf2(struct pwd_info *pwd, const uchar srv_key[8], const char *user, const char *server, const char *domain, uchar sess_key[16]) { uchar kr[16]; RPC_DBG_PRINTF(rpc_e_dbg_auth, 10, ("pwd_make_lm_nt_owf2: user %s, srv %s, dom %s\n", user, server, domain)); SMBgenclientchals(pwd->lm_cli_chal, pwd->nt_cli_chal, &pwd->nt_cli_chal_len, server, domain); ntv2_owf_gen(pwd->smb_nt_pwd, user, domain, kr); /* lm # */ SMBOWFencrypt_ntv2(kr, srv_key, 8, pwd->lm_cli_chal, 8, pwd->smb_lm_owf); memcpy(&pwd->smb_lm_owf[16], pwd->lm_cli_chal, 8); /* nt # */ SMBOWFencrypt_ntv2(kr, srv_key, 8, pwd->nt_cli_chal, pwd->nt_cli_chal_len, pwd->smb_nt_owf); memcpy(&pwd->smb_nt_owf[16], pwd->nt_cli_chal, pwd->nt_cli_chal_len); pwd->nt_owf_len = pwd->nt_cli_chal_len + 16; SMBsesskeygen_ntv2(kr, pwd->smb_nt_owf, sess_key); #ifdef DEBUG_PASSWORD RPC_DBG_PRINTF(rpc_e_dbg_auth, 20, ("server cryptkey: ")); dump_data(20, srv_key, 8); RPC_DBG_PRINTF(rpc_e_dbg_auth, 20, ("client lmv2 cryptkey: ")); dump_data(20, pwd->lm_cli_chal, 8); RPC_DBG_PRINTF(rpc_e_dbg_auth, 20, ("client ntv2 cryptkey: ")); dump_data(20, pwd->nt_cli_chal, pwd->nt_cli_chal_len); RPC_DBG_PRINTF(rpc_e_dbg_auth, 20, ("ntv2_owf_passwd: ")); dump_data(20, pwd->smb_nt_owf, pwd->nt_owf_len); RPC_DBG_PRINTF(rpc_e_dbg_auth, 20, ("nt_sess_pwd: ")); dump_data(20, pwd->smb_nt_pwd, sizeof(pwd->smb_nt_pwd)); RPC_DBG_PRINTF(rpc_e_dbg_auth, 20, ("lmv2_owf_passwd: ")); dump_data(20, pwd->smb_lm_owf, sizeof(pwd->smb_lm_owf)); RPC_DBG_PRINTF(rpc_e_dbg_auth, 20, ("lm_sess_pwd: ")); dump_data(20, pwd->smb_lm_pwd, sizeof(pwd->smb_lm_pwd)); RPC_DBG_PRINTF(rpc_e_dbg_auth, 20, ("session key:\n")); dump_data(20, sess_key, 16); #endif pwd->crypted = True; }
INTERNAL void rpc__ntlmauth_free_info ( rpc_auth_info_p_t *info ) { rpc_ntlmauth_info_p_t ntlmauth_info = NULL; unsigned32 st = 0; OM_uint32 minor_status = 0; RPC_DBG_PRINTF(rpc_e_dbg_auth, RPC_C_CN_DBG_AUTH_ROUTINE_TRACE, ("(rpc__ntlmauth_free_info)\n")); if (info == NULL || *info == NULL) { return; } ntlmauth_info = (rpc_ntlmauth_info_p_t)(*info); if (ntlmauth_info->auth_info.server_princ_name) { rpc_string_free(&ntlmauth_info->auth_info.server_princ_name, &st); } if (ntlmauth_info->gss_server_name) { gss_release_name(&minor_status, &ntlmauth_info->gss_server_name); } if (ntlmauth_info->gss_creds) { gss_release_cred(&minor_status, &ntlmauth_info->gss_creds); } memset(ntlmauth_info, 0, sizeof(*ntlmauth_info)); RPC_MEM_FREE(ntlmauth_info, RPC_C_MEM_NTLMAUTH_INFO); rpc_g_ntlmauth_free_count++; RPC_DBG_PRINTF(rpc_e_dbg_auth, RPC_C_CN_DBG_AUTH_GENERAL, ("(rpc__ntlmauth_free_info) freeing %s auth_info (now %d active).\n", ntlmauth_info->auth_info.is_server ? "server" : "client", rpc_g_ntlmauth_alloc_count - rpc_g_ntlmauth_free_count)); *info = NULL; }
INTERNAL void rpc__ntlmauth_init ( rpc_auth_epv_p_t *epv, rpc_auth_rpc_prot_epv_tbl_t *rpc_prot_epv, unsigned32 *st ) { unsigned32 prot_id; rpc_auth_rpc_prot_epv_t *prot_epv; RPC_DBG_PRINTF(rpc_e_dbg_auth, RPC_C_CN_DBG_AUTH_ROUTINE_TRACE, ("(rpc__ntlmauth_negotiate_init)\n")); /* * Initialize the RPC-protocol-specific EPVs for the RPC protocols * we work with (ncacn). */ /* for now only ncacn, as that's what windows uses */ prot_id = rpc__ntlmauth_cn_init (&prot_epv, st); if (*st == rpc_s_ok) { rpc_g_ntlmauth_rpc_prot_epv[prot_id] = prot_epv; } /* * Return information for this ntlmssp authentication service. */ *epv = &rpc_g_ntlmauth_epv; *rpc_prot_epv = rpc_g_ntlmauth_rpc_prot_epv; *st = 0; }
INTERNAL void release_scall_from_scte ( rpc_dg_scall_p_t scall ) { if (scall->scte->scall == scall) { RPC_DG_SCALL_RELEASE_NO_UNLOCK(&scall->scte->scall); RPC_DBG_PRINTF(rpc_e_dbg_general, 3, ( "(release_scall_from_scte) released cached scall\n")); return; } else { /* * Need to check the maybe chain. */ rpc_dg_scall_p_t curr, prev = NULL; for (curr = scall->scte->maybe_chain; curr != NULL; prev = curr, curr = (rpc_dg_scall_p_t) curr->c.next) { if (curr == scall) { /* * First, remove the scall from the maybe chain */ if (prev == NULL) scall->scte->maybe_chain = (rpc_dg_scall_p_t) curr->c.next; else prev->c.next = curr->c.next; RPC_DG_SCALL_RELEASE_NO_UNLOCK(&curr); RPC_DBG_PRINTF(rpc_e_dbg_general, 3, ( "(release_scall_from_scte) released maybe scall\n")); return; } } } /* * Shouldn't ever get here... */ assert(scall->scte->scall == scall); }
INTERNAL void rpc__ntlmauth_srv_reg_auth ( unsigned_char_p_t server_name ATTRIBUTE_UNUSED, rpc_auth_key_retrieval_fn_t get_key_func ATTRIBUTE_UNUSED, pointer_t arg ATTRIBUTE_UNUSED, unsigned32 *stp ) { RPC_DBG_PRINTF(rpc_e_dbg_auth, RPC_C_CN_DBG_AUTH_ROUTINE_TRACE, ("(rpc__ntlmauth_srv_reg_auth)\n")); *stp = rpc_s_ok; }
PRIVATE void rpc__noauth_dg_way_handler ( rpc_auth_info_p_t info, ndr_byte *in_data, signed32 in_len, signed32 out_max_len, ndr_byte *out_data, signed32 *out_len, unsigned32 *stp ) { sec_krb_message message; error_status_t st; rpc_noauth_info_p_t noauth_info = (rpc_noauth_info_p_t)info; *out_len = 0; RPC_DBG_PRINTF(rpc_e_dbg_auth, 2, ("(rpc__noauth_dg_way_handler) %x called back\n", info)); if (noauth_info->status != rpc_s_ok) { RPC_DBG_GPRINTF(("(rpc__noauth_dg_way_handler) handle was poisoned with %x\n", noauth_info->status)); *stp = noauth_info->status; return; } message.data = 0; message.length = 0; st = sec_krb_dg_build_message (noauth_info->auth_info.u.auth_identity, 0, 0, rpc_c_authn_level_none, noauth_info->auth_info.authz_protocol, 0, 0, 0, &message); if (st != rpc_s_ok) goto out; if (message.length > out_max_len) { st = rpc_s_credentials_too_large; goto out; } memcpy(out_data, message.data, message.length); *out_len = message.length; out: sec_krb_message_free(&message); *stp = st; return; }
PRIVATE void rpc__mem_free ( pointer_t addr, unsigned32 type ) { #ifdef DEBUG if ((type & 0xff) == rpc_g_dbg_switches[rpc_es_dbg_mem_type]) { RPC_DBG_PRINTF(rpc_e_dbg_mem, 1, ("(rpc__mem_free) type %x @ %x\n", type, addr)); } else { RPC_DBG_PRINTF(rpc_e_dbg_mem, 10, ("(rpc__mem_free) type %x @ %x\n", type, addr)); } #endif RPC_MEM_FREE_IL(addr, type); }
INTERNAL void rpc__ntlmauth_inq_my_princ_name ( unsigned32 name_size, unsigned_char_p_t name, unsigned32 *stp ) { RPC_DBG_PRINTF(rpc_e_dbg_auth, RPC_C_CN_DBG_AUTH_ROUTINE_TRACE, ("(rpc__ntlmauth_inq_my_princ_name)\n")); if (name_size > 0) { rpc__strncpy(name, (unsigned char *)"", name_size - 1); } *stp = rpc_s_ok; }
void rpc__ntlmauth_init_func(void) { static rpc_authn_protocol_id_elt_t auth[] = { { /* 0 */ rpc__ntlmauth_init, rpc_c_authn_winnt, dce_c_rpc_authn_protocol_winnt, NULL, rpc_g_ntlmauth_rpc_prot_epv } }; RPC_DBG_PRINTF(rpc_e_dbg_auth, RPC_C_CN_DBG_AUTH_ROUTINE_TRACE, ("(rpc__module_init_func)\n")); rpc__register_authn_protocol(auth, 1); }
PRIVATE void rpc__dg_client_free ( rpc_client_handle_t client_h ) { unsigned16 probe; rpc_dg_client_rep_p_t client = (rpc_dg_client_rep_p_t) client_h; rpc_dg_client_rep_p_t ptr, prev = NULL; RPC_MUTEX_LOCK(monitor_mutex); /* * Hash into the client rep table based on the client handle's UUID. */ probe = CLIENT_HASH_PROBE(&client->cas_uuid, &st); ptr = client_table[probe]; /* * Scan down the hash chain, looking for the reference to the client * handle */ while (ptr != NULL) { if (ptr == client) { if (prev == NULL) client_table[probe] = ptr->next; else prev->next = ptr->next; RPC_MEM_FREE(client, RPC_C_MEM_DG_CLIENT_REP); RPC_DBG_PRINTF(rpc_e_dbg_general, 3, ("(client_free) Freeing client handle\n")); RPC_MUTEX_UNLOCK(monitor_mutex); return; } prev = ptr; ptr = ptr->next; } RPC_MUTEX_UNLOCK(monitor_mutex); }
INTERNAL void fwd_reject ( rpc_dg_sock_pool_elt_p_t sp, rpc_dg_recvq_elt_p_t rqe) { RPC_DBG_PRINTF(rpc_e_dbg_general, 10, ("(fwd_forward) rejecting (ptype=%s) [%s]\n", rpc__dg_pkt_name(RPC_DG_HDR_INQ_PTYPE(rqe->hdrp)), rpc__dg_act_seq_string(rqe->hdrp))); if (! RPC_DG_HDR_FLAG_IS_SET(rqe->hdrp, RPC_C_DG_PF_BROADCAST)) { rpc__dg_xmit_error_body_pkt( sp->sock, (rpc_addr_p_t) &rqe->from, rqe->hdrp, RPC_C_DG_PT_REJECT, nca_s_unk_if); /* !!! status could be better */ } }
PRIVATE void rpc__noauth_dg_who_are_you ( rpc_auth_info_p_t info, handle_t h, idl_uuid_t *actuid, unsigned32 boot_time, unsigned32 *seq, idl_uuid_t *cas_uuid, unsigned32 *stp ) { rpc_noauth_info_p_t noauth_info = (rpc_noauth_info_p_t)info; unsigned char inbuf[12]; /* XXX size */ unsigned char outbuf[1000]; /* XXX size */ unsigned_char_p_t server; signed32 outlen; sec_krb_message message; int st; /* XXX set up exception handler here around remote call? */ RPC_DBG_PRINTF(rpc_e_dbg_auth, 2, ("(rpc__noauth_dg_way) %x doing callback\n", info)); /* do call */ (*conv_v3_0_c_epv.conv_who_are_you_auth) (h, actuid, boot_time, inbuf, 0, sizeof(outbuf), seq, cas_uuid, outbuf, &outlen, stp); st = *stp; if (st != rpc_s_ok) { RPC_DBG_GPRINTF(("(rpc__noauth_dg_way) conv_who_are_you_auth failed, st %x\n", st)); return; } message.data = outbuf; message.length = outlen; *stp = sec_krb_dg_decode_message (&message, 0, &noauth_info->client_name, &noauth_info->client_pac, &noauth_info->client_creds, /* FAKE-EPAC */ &server, &noauth_info->auth_info.authn_level, &noauth_info->auth_info.authz_protocol, 0, 0, 0, 0); }
INTERNAL void rpc__ntlmauth_mgt_inq_def ( unsigned32 *authn_level, unsigned32 *stp ) { RPC_DBG_PRINTF(rpc_e_dbg_auth, RPC_C_CN_DBG_AUTH_ROUTINE_TRACE, ("(rpc__ntlmauth_mgt_inq_def)\n")); if (authn_level == NULL) { *stp = rpc_s_invalid_arg; return; } *authn_level = rpc_c_authn_level_connect; *stp = rpc_s_ok; }
PRIVATE rpc_auth_info_p_t rpc__noauth_dg_create ( unsigned32 *stp ) { rpc_noauth_info_p_t noauth_info; RPC_MEM_ALLOC (noauth_info, rpc_noauth_info_p_t, sizeof (*noauth_info), RPC_C_MEM_UTIL, RPC_C_MEM_WAITOK); rpc_g_noauth_alloc_count++; RPC_DBG_PRINTF(rpc_e_dbg_auth, 1, ("(rpc__noauth_dg_create) %x created (now %d active)\n", noauth_info, rpc_g_noauth_alloc_count - rpc_g_noauth_free_count)); memset (noauth_info, '\0', sizeof(*noauth_info)); RPC_MUTEX_INIT(noauth_info->lock); noauth_info->creds_valid = 0; noauth_info->level_valid = 0; noauth_info->client_valid = 0; /* * fill in the common auth_info stuff. */ noauth_info->auth_info.refcount = 1; noauth_info->auth_info.server_princ_name = 0; noauth_info->auth_info.authn_level = -1; noauth_info->auth_info.authn_protocol = rpc_c_authn_dce_dummy; noauth_info->auth_info.authz_protocol = rpc_c_authz_name; noauth_info->auth_info.is_server = 1; noauth_info->auth_info.u.s.privs = 0; { /* FAKE-EPAC */ noauth_info->auth_info.u.s.creds = 0; } /* XXX do other initialization here. */ *stp = 0; return (rpc_auth_info_p_t) noauth_info; }
/**************************************************************************** makes lm and nt OWF crypts ****************************************************************************/ void pwd_make_lm_nt_owf(struct pwd_info *pwd, uchar cryptkey[8], uchar sess_key[16]) { if (pwd->null_pwd) { #ifdef DEBUG_PASSWORD RPC_DBG_PRINTF(rpc_e_dbg_auth, 20, ("pwd_make_lm_nt_owf: NULL password\n")); #endif pwd->nt_owf_len = 0; return; } /* generate 24-byte hashes */ SMBOWFencrypt(pwd->smb_lm_pwd, cryptkey, pwd->smb_lm_owf); SMBOWFencrypt(pwd->smb_nt_pwd, cryptkey, pwd->smb_nt_owf); pwd->nt_owf_len = 24; SMBsesskeygen_ntv1(pwd->smb_nt_pwd, pwd->smb_nt_owf, sess_key); #ifdef DEBUG_PASSWORD RPC_DBG_PRINTF(rpc_e_dbg_auth, 20, ("client cryptkey: ")); dump_data(20, cryptkey, 8); RPC_DBG_PRINTF(rpc_e_dbg_auth, 20, ("nt_owf_passwd: ")); dump_data(20, pwd->smb_nt_owf, pwd->nt_owf_len); RPC_DBG_PRINTF(rpc_e_dbg_auth, 20, ("nt_sess_pwd: ")); dump_data(20, pwd->smb_nt_pwd, sizeof(pwd->smb_nt_pwd)); RPC_DBG_PRINTF(rpc_e_dbg_auth, 20, ("lm_owf_passwd: ")); dump_data(20, pwd->smb_lm_owf, sizeof(pwd->smb_lm_owf)); RPC_DBG_PRINTF(rpc_e_dbg_auth, 20, ("lm_sess_pwd: ")); dump_data(20, pwd->smb_lm_pwd, sizeof(pwd->smb_lm_pwd)); RPC_DBG_PRINTF(rpc_e_dbg_auth, 20, ("session key:\n")); dump_data(20, sess_key, 16); #endif pwd->crypted = True; }
/**************************************************************************** gets lm and nt crypts ****************************************************************************/ void pwd_get_lm_nt_owf(struct pwd_info *pwd, uchar lm_owf[24], uchar * nt_owf, size_t * nt_owf_len) { if (pwd->null_pwd) { #ifdef DEBUG_PASSWORD RPC_DBG_PRINTF(rpc_e_dbg_auth, 20, ("pwd_get_lm_nt_owf: NULL password\n")); #endif if (nt_owf_len != NULL) { *nt_owf_len = 0; } return; } memcpy_zero(lm_owf, pwd->smb_lm_owf, 24); memcpy_zero(nt_owf, pwd->smb_nt_owf, pwd->nt_owf_len); if (nt_owf_len != NULL) { *nt_owf_len = pwd->nt_owf_len; } }
PRIVATE void rpc__ntlmauth_free_info ( rpc_auth_info_p_t *info ) { rpc_ntlmauth_info_p_t ntlmauth_info = (rpc_ntlmauth_info_p_t)*info ; char *info_type = (*info)->is_server?"server":"client"; unsigned32 tst; RPC_MUTEX_DELETE(ntlmauth_info->lock); if ((*info)->server_princ_name) rpc_string_free (&(*info)->server_princ_name, &tst); (*info)->u.s.privs = 0; // sec_id_pac_util_free (&ntlmauth_info->client_pac); memset (ntlmauth_info, 0x69, sizeof(*ntlmauth_info)); RPC_MEM_FREE (ntlmauth_info, RPC_C_MEM_UTIL); rpc_g_ntlmauth_free_count++; RPC_DBG_PRINTF(rpc_e_dbg_auth, 1, ( "(rpc__ntlmauth_release) freeing %s auth_info (now %d active).\n", info_type, rpc_g_ntlmauth_alloc_count - rpc_g_ntlmauth_free_count)); *info = NULL; }
INTERNAL void fwd_forward ( rpc_dg_sock_pool_elt_p_t sp, rpc_dg_recvq_elt_p_t rqe, rpc_addr_p_t fwd_addr) { rpc_dg_pkt_hdr_p_t hdrp = rqe->hdrp; rpc_dg_raw_pkt_hdr_p_t rhdrp = &rqe->pkt->hdr; rpc_socket_iovec_t iov[3]; boolean b; unsigned32 st; RPC_DBG_PRINTF(rpc_e_dbg_general, 10, ("(fwd_forward) forwarding (ptype=%s) [%s]\n", rpc__dg_pkt_name(RPC_DG_HDR_INQ_PTYPE(hdrp)), rpc__dg_act_seq_string(hdrp))); #ifndef MISPACKED_HDR /* * Create the fpkt hdr. */ fhdr.len = rqe->from.len; /*b_c_o_p_y((byte_p_t) &rqe->from.sa, (byte_p_t) &fhdr.addr, fhdr.len);*/ memmove((byte_p_t)&fhdr.addr, (byte_p_t)&rqe->from.sa, fhdr.len) ; fhdr.drep[0] = hdrp->drep[0]; fhdr.drep[1] = hdrp->drep[1]; fhdr.drep[2] = hdrp->drep[2]; fhdr.drep[3] = 0; /* * Set up the 1st two components of the forwarded pkt. Note that * the header we're sending out is the original (raw) one we received * (i.e., not the potentially byte-swapped one). Authentication * checksumming requires that we do this. (The checksum is produced * on the original header and the forwardee must do the same.) The * forwardee will byte swap again (on a copy, like we did) if it * has too. * * Note that on MISPACKED_HDR systems we're counting on the fact * that rpc__dg_xmit_pkt will recognize (based on iov[0].len) that * the header must NOT be compressed. */ iov[0].iov_base = (byte_p_t) rhdrp; iov[0].iov_len = RPC_C_DG_RAW_PKT_HDR_SIZE; iov[1].iov_base = (byte_p_t) &fhdr; iov[1].iov_len = sizeof(rpc_dg_fpkt_hdr_t); /* * Note that the original pkt may already be the max size, forcing * us to forward the pkt in two pieces. We use the * "initial_max_pkt_size" for comparison so that a 2.0 rpcd can work * correctly with 1.5.1 servers (that are bound to a 1.5.1 libnck). * N.B. don't confuse all this with packet fragmentation and * reassembly, which is network-visible, not a intra-machine hack like * this is. * * Note that the current form (size) of the fpkt header's forwarding * address (a sockaddr) limits our ability to perform forwarding * for "large" address transports. There are several schemes that * we can use in the future (once we have to deal with a larger address * transport) to work around this deficiency. We could (a) decide * to use a pkt header flag to tag new format fwd pkts, (b) just * use more space (a new format) for for those transports that require * it and let recv_pkt() deduce the format knowing the transport, * (c) make clients using such transports not use this forwarding * mechanism (i.e. clients would call ep_map() directly at the start * of a call). This latter scheme may become necessary once the * address exceeds some threashold since there's only so much space * in a pkt once and the address gets too large, this forwarding * scheme becomes undesireable, there won't be much (any) room left * for data. * * Mark the pkt as forwarded and send it (them). Note that we're * setting bits in the original packet header (via "rhdrp"), NOT * the potentially byte-swapped header (via "hdrp"). */ rhdrp->hdr[RPC_C_DG_RPHO_FLAGS] |= RPC_C_DG_PF_FORWARDED; if (rqe->pkt_len + sizeof(rpc_dg_fpkt_hdr_t) <= RPC_C_DG_INITIAL_MAX_PKT_SIZE) { iov[2].iov_base = ((byte_p_t) rqe->pkt) + RPC_C_DG_RAW_PKT_HDR_SIZE; iov[2].iov_len = rqe->pkt_len - RPC_C_DG_RAW_PKT_HDR_SIZE; rpc__dg_xmit_pkt(sp->sock, fwd_addr, iov, 3, &b); } else { unsigned8 orig_len_b0 = rhdrp->hdr[RPC_C_DG_RPHO_LEN]; unsigned8 orig_len_b1 = rhdrp->hdr[RPC_C_DG_RPHO_LEN + 1]; /* * The first piece just contains the sender's address */ rhdrp->hdr[RPC_C_DG_RPHO_FLAGS2] |= RPC_C_DG_PF2_FORWARDED_2; rhdrp->hdr[RPC_C_DG_RPHO_LEN] = 0; rhdrp->hdr[RPC_C_DG_RPHO_LEN + 1] = 0; rpc__dg_xmit_pkt(sp->sock, fwd_addr, iov, 2, &b); if (b) { /* * The second piece just contains the original pkt. */ rhdrp->hdr[RPC_C_DG_RPHO_FLAGS] &= ~RPC_C_DG_PF_FORWARDED; rhdrp->hdr[RPC_C_DG_RPHO_FLAGS2] &= ~RPC_C_DG_PF2_FORWARDED_2; rhdrp->hdr[RPC_C_DG_RPHO_LEN] = orig_len_b0; rhdrp->hdr[RPC_C_DG_RPHO_LEN + 1] = orig_len_b1; iov[1].iov_base = ((byte_p_t) rqe->pkt) + RPC_C_DG_RAW_PKT_HDR_SIZE; iov[1].iov_len = rqe->pkt_len - RPC_C_DG_RAW_PKT_HDR_SIZE; rpc__dg_xmit_pkt(sp->sock, fwd_addr, iov, 2, &b); } } #else #error "No code for MISPACKED_HDR!" #endif rpc__naf_addr_free(&fwd_addr, &st); }
PRIVATE void rpc__dg_stats_print(void) { unsigned16 i; RPC_DBG_PRINTF(rpc_e_dbg_stats, 1, ("RPC DG Protocol Statistics\n") ); RPC_DBG_PRINTF(rpc_e_dbg_stats, 1, ("--------------------------------------------------------\n") ); RPC_DBG_PRINTF(rpc_e_dbg_stats, 1, ("Calls sent: %9lu\n", rpc_g_dg_stats.calls_sent) ); RPC_DBG_PRINTF(rpc_e_dbg_stats, 1, ("Calls rcvd: %9lu\n", rpc_g_dg_stats.calls_rcvd) ); RPC_DBG_PRINTF(rpc_e_dbg_stats, 1, ("Pkts sent: %9lu\n", rpc_g_dg_stats.pkts_sent) ); RPC_DBG_PRINTF(rpc_e_dbg_stats, 1, ("Pkts rcvd: %9lu\n", rpc_g_dg_stats.pkts_rcvd) ); RPC_DBG_PRINTF(rpc_e_dbg_stats, 1, ("Broadcasts sent: %9lu\n", rpc_g_dg_stats.brds_sent) ); RPC_DBG_PRINTF(rpc_e_dbg_stats, 1, ("Dups sent: %9lu\n", rpc_g_dg_stats.dups_sent) ); RPC_DBG_PRINTF(rpc_e_dbg_stats, 1, ("Dups rcvd: %9lu\n", rpc_g_dg_stats.dups_rcvd) ); RPC_DBG_PRINTF(rpc_e_dbg_stats, 1, ("Out of orders rcvd: %9lu\n", rpc_g_dg_stats.oo_rcvd) ); RPC_DBG_PRINTF(rpc_e_dbg_stats, 1, ("\nBreakdown by packet type sent rcvd\n") ); RPC_DBG_PRINTF(rpc_e_dbg_stats, 1, ("------------------------------------------------------------------\n") ); for (i = 0; i <= RPC_C_DG_PT_MAX_TYPE; i++) { RPC_DBG_PRINTF(rpc_e_dbg_stats, 1, ("(%02u) %-10s %9lu %9lu\n", i, rpc__dg_pkt_name(i), rpc_g_dg_stats.pstats[i].sent, rpc_g_dg_stats.pstats[i].rcvd) ); } }
PRIVATE void rpc__dg_scall_reinit ( rpc_dg_scall_p_t scall, rpc_dg_sock_pool_elt_p_t sp, rpc_dg_recvq_elt_p_t rqe ) { rpc_dg_pkt_hdr_p_t hdrp = rqe->hdrp; unsigned32 st; boolean maybe = RPC_DG_HDR_FLAG_IS_SET(rqe->hdrp, RPC_C_DG_PF_MAYBE); RPC_LOCK_ASSERT(0); RPC_DG_CALL_LOCK_ASSERT(&scall->c); /* * Re-initialize the common call handle fields */ RPC_DG_CALL_REINIT(&scall->c); scall->c.c.u.server.cancel.accepting = true; scall->c.c.u.server.cancel.queuing = true; scall->c.c.u.server.cancel.had_pending = false; scall->c.c.u.server.cancel.count = 0; /* * Typically, subsequent calls on a given actid will be for the same * naf and network address and received over the same server socket * from the same client socket (netaddr/endpoint), but alas, we can't * count on that... */ /* * Detect naf changes and reinit cached naf-specific info. * * The max_frag_size is really associated with the * more specific "network address / interface" than just the naf * (actually they're really dependent on the even lower level of * path through the network even if the peer address don't change). * However, since the runtime currently manages these as constant * for a particular naf (mostly due to to the inability of system * APIs and/or network transports to provide this dynamic information), * we really only have to reset them if the naf changed (the significance * of this is a "different netaddr" check would be more costly). */ if (scall->c.addr == NULL || rqe->from.rpc_protseq_id != scall->c.addr->rpc_protseq_id) { /* * Update to the current client address. */ rpc__naf_addr_overcopy((rpc_addr_p_t) &rqe->from, &scall->c.addr, &st); /* * Initialize the max_frag_size field for the conversation with this * client. */ RPC_DG_CALL_SET_MAX_FRAG_SIZE(&scall->c, &st); RPC_DBG_PRINTF(rpc_e_dbg_recv, 7, ("(rpc__dg_scall_reinit) Set max fs %u\n", scall->c.xq.max_frag_size)); } else { /* * Update to the (typically unchanged) current client address. * (Only its endpoint may change.) */ rpc__naf_addr_overcopy((rpc_addr_p_t) &rqe->from, &scall->c.addr, &st); } /* * Detect received socket changes and reinit cached socket specific info * (the scall may not yet have a cached sock ref or it may be different * from the current one). */ if (scall->c.sock_ref != sp) { if (scall->c.sock_ref != NULL) rpc__dg_network_sock_release(&scall->c.sock_ref); /* * This reference update is a little tricky. We need to be sure * that the socket is not closed before we get a chance to record * our reference. We can do this safely because we are the * listener thread, and and we know that the listener thread * has a reference to the socket. If the socket had failed, * and we had closed it, we wouldn't be here right now. */ scall->c.sock_ref = sp; rpc__dg_network_sock_reference(sp); /* * Initialize the max_rcv_tsdu and max_snd_tsdu fields * for the conversation with this client. */ rpc__naf_inq_max_tsdu(scall->c.addr->rpc_protseq_id, &scall->c.xq.max_rcv_tsdu, &st); scall->c.xq.max_snd_tsdu = scall->c.xq.max_rcv_tsdu; scall->c.xq.max_rcv_tsdu = MIN(scall->c.xq.max_rcv_tsdu, scall->c.sock_ref->rcvbuf); scall->c.xq.max_snd_tsdu = MIN(scall->c.xq.max_snd_tsdu, scall->c.sock_ref->sndbuf); RPC_DBG_PRINTF(rpc_e_dbg_recv, 7, ("(rpc__dg_scall_reinit) Set rcv tsdu %u, snd tsdu %u\n", scall->c.xq.max_rcv_tsdu, scall->c.xq.max_snd_tsdu)); /* * Reinit cached socket-specific information. */ RPC_DG_RBUF_SIZE_TO_WINDOW_SIZE(sp->rcvbuf, sp->is_private, scall->c.xq.max_frag_size, scall->c.rq.window_size); RPC_DBG_PRINTF(rpc_e_dbg_recv, 7, ("(rpc__dg_scall_reinit) Set ws %u, rcvbuf %u, max fs %u\n", scall->c.rq.window_size, sp->rcvbuf, scall->c.xq.max_frag_size)); } if (scall->c.is_cbk && scall->cbk_ccall != NULL) { /* * This is essentially a turnaround. The client, which is waiting * for a response, becomes the receiver. * * We inherit high_rcv_frag_size and snd_frag_size from the original * ccall. * * Note: If this is the initial allocation of the callback scall, * is_cbk is still false. rpc__dg_scall_cbk_alloc() will handle that * case. */ scall->c.rq.high_rcv_frag_size = scall->cbk_ccall->c.rq.high_rcv_frag_size; scall->c.xq.snd_frag_size = scall->cbk_ccall->c.xq.snd_frag_size; /* * Also we inherit the reservation from the original ccall, which * gives us enough packets for receiving fragments. */ scall->c.n_resvs = scall->cbk_ccall->c.n_resvs; } RPC_DBG_PRINTF(rpc_e_dbg_xmit, 6, ("(rpc__dg_scall_reinit) Set snd fs %lu, high rcv fs %lu\n", scall->c.xq.snd_frag_size, scall->c.rq.high_rcv_frag_size)); /* * Re-initialize the fields of the common call handle header that * are really part of the prototype packet header. */ scall->c.call_seq = hdrp->seq; scall->c.high_seq = hdrp->seq; scall->c.call_if_id = hdrp->if_id; scall->c.call_if_vers = hdrp->if_vers; scall->c.call_ihint = hdrp->ihint; scall->c.call_opnum = hdrp->opnum; scall->c.call_object = hdrp->object; /* * Re-initialize some remaining fields in the prototype packet header. * Note: the ptype may not currently be "response" due to the way * we handle fault pkts. */ scall->c.xq.base_flags = 0; scall->c.xq.base_flags2 = 0; scall->c.xq.hdr.flags = 0; scall->c.xq.hdr.flags2 = 0; RPC_DG_HDR_SET_PTYPE(&scall->c.xq.hdr, RPC_C_DG_PT_RESPONSE); /* * Reset the call state to the initial state. */ RPC_DG_CALL_SET_STATE(&scall->c, rpc_e_dg_cs_init); scall->call_is_setup = false; scall->has_call_executor_ref = false; scall->call_is_queued = false; scall->client_needs_sboot = false; /* Really "unknown" */ scall->c.com_timeout_knob = rpc_mgmt_inq_server_com_timeout(); /* * If the new call uses maybe semantics, and this scall is already * associated with an SCTE, then we may need to reposition this scall * within the SCTE. */ if (maybe && scall->scte != NULL && scall->scte->scall == scall) { rpc_dg_sct_elt_p_t scte = scall->scte; RPC_DBG_PRINTF(rpc_e_dbg_general, 3, ( "(rpc__dg_scall_reinit) using cached scall for maybe call\n")); scall->c.next = (rpc_dg_call_p_t) scte->maybe_chain; scte->maybe_chain = scall; scte->scall = NULL; } }
INTERNAL boolean32 scall_uncache ( rpc_dg_scall_p_t scall ) { unsigned32 st; boolean b; RPC_TRY_LOCK(&b); if (! b) { RPC_DBG_GPRINTF(("(scall_uncache) couldn't get global lock\n")); return false; } RPC_DG_CALL_LOCK_ASSERT(&scall->c); assert(scall->c.state == rpc_e_dg_cs_idle || scall->c.state == rpc_e_dg_cs_orphan); if (scall->c.is_cbk) { /* * This is a *client side* callback scall; dissociate from our * cbk_ccall if necessary. */ if (scall->cbk_ccall != NULL) { rpc_dg_ccall_p_t ccall = scall->cbk_ccall; assert(ccall->cbk_scall == scall); /* * Acquire the callback ccall lock. Note the locking hierarchy * for this type of call handle pairing is: cbk_ccall, is_cbk scall * (see dg.h). */ RPC_DG_CALL_TRY_LOCK(&ccall->c, &b); if (! b) { RPC_DBG_GPRINTF( ("(scall_uncache) couldn't get cbk_scall->cbk_ccall lock\n")); RPC_UNLOCK(0); return false; } ccall->cbk_start = false; RPC_DG_CCALL_RELEASE(&scall->cbk_ccall); RPC_DG_SCALL_RELEASE_NO_UNLOCK(&ccall->cbk_scall); } } else { /* * This is a normal (server side) scall. */ /* * If this server side scall has been part of a callback back * to the client, free up the cached *server side* callback ccall * resources. */ if (scall->cbk_ccall != NULL) { rpc_dg_ccall_p_t ccall = scall->cbk_ccall; assert(ccall->cbk_scall == scall); /* * Acquire the callback ccall lock. Note the locking hierarchy * for this type of call handle pairing is: scall, is_cbk ccall * (see dg.h). */ RPC_DG_CALL_LOCK(&ccall->c); rpc__dg_ccall_free_prep(ccall); /* * Release the reference the CCALL has to its originating SCALL. */ RPC_DG_SCALL_RELEASE_NO_UNLOCK(&ccall->cbk_scall); /* * Release the reference the SCALL has to the CCALL it used for * the callback. Then call free_handle, which will stop the * timer and release the client binding handles reference to * the CCALL. */ RPC_DG_CCALL_RELEASE(&scall->cbk_ccall); RPC_BINDING_RELEASE((rpc_binding_rep_p_t *) &ccall->h, &st); } /* * Dissociate the scall from its scte if necessary. Presumably, * the only time that the scall won't have a scte is if the call * had been orphaned, though we don't count on that. */ if (scall->scte != NULL) { release_scall_from_scte(scall); /* * Release the SCALL's reference to the SCTE. */ RPC_DG_SCT_RELEASE(&scall->scte); } } /* * Common scall uncache processing. */ RPC_DBG_PRINTF(rpc_e_dbg_general, 3, ("(scall_uncache) Freeing cached SCALL [%s]\n", rpc__dg_act_seq_string(&scall->c.xq.hdr))); /* * Dissociate the scall from the server binding handle if necessary. */ if (scall->h != NULL) { RPC_DG_SCALL_RELEASE_NO_UNLOCK(&scall->h->scall); RPC_BINDING_RELEASE((rpc_binding_rep_p_t *) &scall->h, &st); } /* * Stop the scall's timer and dissociate it from the scall. */ rpc__timer_clear(&scall->c.timer); RPC_DG_SCALL_RELEASE(&scall); RPC_UNLOCK(0); return true; }
PRIVATE void rpc__dg_execute_call ( dce_pointer_t scall_, boolean32 call_was_queued ATTRIBUTE_UNUSED ) { ndr_format_t drep; unsigned32 st, reject_st; boolean broadcast; boolean idem = false; boolean maybe; boolean sent_response; boolean called_stub; rpc_dg_scall_p_t scall = (rpc_dg_scall_p_t) scall_; rpc_dg_pkt_hdr_p_t hdrp; rpc_iovector_elt_t iove; rpc_dg_recvq_elt_p_t rqe; unsigned16 ihint; rpc_dg_binding_server_p_t h; rpc_v2_server_stub_epv_t ss_epv; rpc_mgr_epv_t mgr_epv; rpc_if_rep_p_t ifspec; idl_uuid_t type; int force_way_auth; rpc_key_info_p_t key_info; rpc_dg_auth_epv_p_t auth_epv; unsigned16 opnum; unsigned32 flags; unsigned32 max_calls; unsigned32 max_rpc_size; rpc_if_callback_fn_t if_callback; int prev_cancel_state; /* * All of this code (99% of which is never executed) is in the fast path. * * NOTE: This routine is responsible for sending back a correct * cancel pending status to the client under all conditions * (to ensure that cancels don't get lost - i.e. forwarded to the * server, accepted, not delivered and then not reported as * a cancel pending). * * Any "reject response" to the client must be robust (at least for * Non-Idempotent calls). This is necessary because the client may * have already received a fack causing it to free some pkts that it * would need to "rerun" the call (assuming the stub was never entered) * in the event that a reject was lost. * * Client's recover from lost responses to idempotent calls (including * proper cancel pending resetting) so we don't have to worry about * being robust in this situation. */ /* * The caller of this routine is responsible for handing off a * call *reference* to us. We will release our reference when * we're done. */ RPC_DG_CALL_LOCK_ASSERT(&scall->c); /* * We are now executing. */ scall->call_is_queued = false; /* * Initialize the iove, since in any failure case (i.e. orphan), * it may not be updated correctly; subsequent logic depends on freeing * things based on the proper state of the iove. */ iove.buff_dealloc = NULL; /* * Initialize the "called_stub" flag to false. If a call gets * rejected, and never enters the stub routine, it's up to us to * free the request RQE. */ called_stub = false; /* * Initialize the "sent call response" flag to indicate a failure. * This is necessary so that failures resulting in END_OF_CALL * end up transitioning to the proper call state when we wrap-up * call processing (at the end of this routine). */ sent_response = false; /* * Before continuing, it's likely that the call has been "opened * up" (due to a unlock/lock around call executor handoff) and we * need to check if it is safe to continue... */ if (scall->c.state != rpc_e_dg_cs_recv) goto END_OF_CALL; /* * If this call does not yet have a reservation, make one now. Any * call that was queued will not have a reservation; also, even if * a executor thread was initially available for the call, there * might not have been any reservations available at that time. * (Note that the call to make the reservation may block until a * reservation becomes available.) * * The make_reservation routine requires that the global lock be * held. To respect the locking heirarchy, we need to juggle the * locks around a little, checking that the state of the call doesn't * change during the times when it's unlocked. */ if (scall->c.n_resvs < scall->c.max_resvs) { RPC_DG_CALL_UNLOCK(&scall->c); RPC_LOCK(0); RPC_DG_CALL_LOCK(&scall->c); if (scall->c.state != rpc_e_dg_cs_recv) { RPC_UNLOCK(0); goto END_OF_CALL; } /* * We always start with the maximum reservation because we no longer * reset high_rcv_frag_size and snd_frag_size between the calls. * (The previous call may have used/advertised the larger fragment * size.) * * This is fine in the user space since the packet rationing will * never happen. (We assume that there are always enough packet * buffers available.) * * This may accelerate the packet rationing in the kernel, though * (iff MBF is turned on). Unfortunately, we can't start with the * minimum reservation in the kernel because the other end may be a * user space. */ rpc__dg_pkt_adjust_reservation(&scall->c, scall->c.max_resvs, true); RPC_UNLOCK(0); /* * Since the call's been opened up, we need to check its status. */ if (scall->c.state != rpc_e_dg_cs_recv) { RPC_DBG_GPRINTF(( "(rpc__dg_execute_call) Cancelled while awaiting pkt reservation\n")); goto END_OF_CALL; } /* * Since this call did not have a reservation, any data received for * it was dropped, and the client was told not to send any more. * Since the call can now receive data, prod the client into * retransmitting. */ rpc__dg_call_xmit_fack(&scall->c, NULL, ! scall->c.rq.recving_frags); } /* * Now's as good a time as any to enable direct cancel posting to * the thread (while we've got the call lock held). It might have * been nice to defer this to just before the sstub dispatch, but * then we'd have to re-acquire the call lock. * * NOTE: This routine MUST call rpc_cthread_cancel_caf() before * returning (regardless of the return path)! This requirement * exists because cancels may be (become) pending at any time and * must be flushed (otherwise subsequent calls using this thread * will inherit this call's cancel). */ rpc__cthread_cancel_enable_post(&scall->c.c); /* * Create a server binding handle, if we don't already have one hanging * off the scall. If we have a cached one, reinit it. */ if (scall->h != NULL) { h = scall->h; RPC_DG_BINDING_SERVER_REINIT(h); } else { rpc_addr_p_t addr; rpc__naf_addr_copy(scall->c.addr, &addr, &st); h = (rpc_dg_binding_server_p_t) rpc__binding_alloc (true, &scall->c.call_object, RPC_C_PROTOCOL_ID_NCADG, addr, &st); if (st != rpc_s_ok) { RPC_DBG_GPRINTF(( "(rpc__dg_execute_call) Can't allocate binding, st = 0x%x\n", st)); goto END_OF_CALL; } RPC_DG_CALL_REFERENCE(&scall->c); h->scall = scall; if (!scall->c.is_cbk) { key_info = scall->scte->key_info; if (key_info != NULL) { rpc_auth_info_p_t auth_info = key_info->auth_info; h->c.c.auth_info = auth_info; RPC_DG_AUTH_REFERENCE(auth_info); /* for the handle */ } } scall->h = h; } assert(RPC_DG_CALL_IS_SERVER(&scall->c)); /* * Dequeue the first pkt off of the receive queue (including it's hdr). * * WARNING: we MUST use comm_receive_int() because comm_receive(), * while it would do the locking for us, doesn't return a useable iove * for 0 length data. * * We're supposed to be in the init state until we know we're accepting * the call (that means after a WAY callback if one is necessary). * Make certain this is the case following the receive. * * WARNING 2: Note that this call verifies the authenticity of the * packet it reads *except* in two cases: * * - When the packet is from a call on an activity the server doesn't * currently know about (in which case we notice later on that the * authn_proto field in the header is non-zero). * * - When the authentication check fails with a status code of * "rpc_s_dg_need_way_auth". Note that in this event, the * "receive_int" is still viewed as having succeeded, albeit with * a non-zero status code. * * In either of these cases, a way_auth callback is made, and, * if it is successful, the authenticity check is retried * (further down in this function). */ rpc__dg_call_receive_int(&scall->c, &iove, &st); force_way_auth = false; if (st == rpc_s_dg_need_way_auth) { RPC_DBG_PRINTF(rpc_e_dbg_general, 4, ("(rpc__dg_execute_call) will force way callback\n")); st = rpc_s_ok; /* * We don't own the rqe. It's still on recvq. */ force_way_auth = true; } else if (st != rpc_s_ok) { RPC_DBG_GPRINTF(( "(rpc__dg_execute_call) Receive failed st = 0x%x\n", st)); goto END_OF_CALL; } rqe = RPC_DG_RECVQ_ELT_FROM_IOVECTOR_ELT(&iove); assert(rqe != NULL && rqe->hdrp != NULL); hdrp = rqe->hdrp; idem = ((hdrp->flags & RPC_C_DG_PF_IDEMPOTENT) != 0); broadcast = ((hdrp->flags & RPC_C_DG_PF_BROADCAST) != 0); maybe = ((hdrp->flags & RPC_C_DG_PF_MAYBE) != 0); if (scall->c.is_cbk) { RPC_DBG_PRINTF(rpc_e_dbg_general, 3, ("(rpc__dg_execute_call) Callback [%s]\n", rpc__dg_act_seq_string(hdrp))); } /* * Perform some of the request pkt verification that was defered. * This includes interface id and operation number. */ if (!scall->c.is_cbk) key_info = scall->scte->key_info; else key_info = NULL; /* * Does the request specify authentication, do we not have auth info * yet, is the call not "maybe", and is this not a callback (!!! * for the callback case)? If so, then get the auth info now. */ if (hdrp->auth_proto != 0 && key_info == NULL && ! maybe && ! scall->c.is_cbk) { rpc_authn_protocol_id_t authn_protocol; rpc_auth_info_p_t auth_info; assert(scall->c.key_info == NULL); /* * Get the appropiate DG auth EPV. We need to convert the wire * auth protocol ID into the corresponding API value and then * get the EPV using that latter value. */ authn_protocol = rpc__auth_cvt_id_wire_to_api(hdrp->auth_proto, &st); if (st != rpc_s_ok) { reject_st = rpc_s_unknown_reject; goto AFTER_CALL_TO_STUB; } auth_epv = (rpc_dg_auth_epv_p_t) rpc__auth_rpc_prot_epv (authn_protocol, RPC_C_PROTOCOL_ID_NCADG); if (auth_epv == NULL) { reject_st = rpc_s_unknown_reject; goto AFTER_CALL_TO_STUB; } /* * Call into auth service to create an auth info. * * This generates an auth_info and a key_info. The auth_info * gets attached to the handle, while the key_info gets * attached to the scte and scall. */ key_info = (*auth_epv->create) (&st); if (st != rpc_s_ok) { reject_st = rpc_s_unknown_reject; goto AFTER_CALL_TO_STUB; } scall->c.key_info = key_info; scall->c.auth_epv = auth_epv; /* we have one reference to the key_info already. */ scall->scte->key_info = key_info; scall->scte->auth_epv = auth_epv; RPC_DG_KEY_REFERENCE(key_info); /* for the scte */ /* fill in the auth_info in the handle */ auth_info = key_info->auth_info; h->c.c.auth_info = auth_info; RPC_DG_AUTH_REFERENCE(auth_info); /* for the handle */ } auth_epv = scall->c.auth_epv; /* * If the interface isn't valid, send a rejection. */ rpc_object_inq_type(&scall->c.call_object, &type, &st); if (! (st == rpc_s_ok || st == rpc_s_object_not_found)) { RPC_DBG_GPRINTF(( "(rpc__dg_execute_call) rpc_object_inq_type failed, st=0x%x [%s]\n", st, rpc__dg_act_seq_string(hdrp))); reject_st = st; goto AFTER_CALL_TO_STUB; } ihint = hdrp->ihint; rpc__if_lookup2 (&hdrp->if_id, hdrp->if_vers, &type, &ihint, &ifspec, &ss_epv, &mgr_epv, &flags, &max_calls, &max_rpc_size, &if_callback, &st); if (st != rpc_s_ok) { RPC_DBG_GPRINTF(( "(rpc__dg_execute_call) rpc__if_lookup failed, st=0x%x [%s]\n", st, rpc__dg_act_seq_string(hdrp))); reject_st = st; goto AFTER_CALL_TO_STUB; } /* * The interface is valid, update the call ihint so we tell the client. */ scall->c.call_ihint = ihint; /* * Extract a copy of the opnum from the packet header, and check to see that * it's appropriate for this interface. */ opnum = hdrp->opnum; if (opnum >= ifspec->opcnt) { RPC_DBG_GPRINTF(( "(rpc__dg_execute_call) Opnum (%u) out of range [%s]\n", opnum, rpc__dg_act_seq_string(hdrp))); reject_st = rpc_s_op_rng_error; goto AFTER_CALL_TO_STUB; } /* * To guarantee at-most-once semantics for non-idempotent RPCs, we * must ensure that the call is filtered based on a WAY validated * sequence number. If we don't have such a sequence number, then * call back to client to get one (the returned WAY validated seq * must match this RPC's seq - i.e. it must be the RPC that the client * is currently performing). Note that we may do a way_auth * callback even when we wouldn't otherwise do it because the * underlying authentication layers decided one was needed. * * The analogous processing for non-idempotent callbacks (from a * server manager to the client originating the call, who needs to * validate the callback's seq) was previously taken care of in the * do_request() processing (a WAY validated logical scte high_seq * was already known). * * Note also that maybe calls with large-INs are tagged as * non-idempotent but do not need to be protected against re-runs. * (The architecture specifies that maybe calls can *not* have * at-most-once semantics, but the implementation finds it more * convenient to use the non-idempotent code paths for handling * calls with large-INs.) For this reason, avoid doing a WAY for * maybe calls (the client may not even be still running!). * * Release and reacquire the call lock while performing this * (slow path / lengthy) WAY and Auth processing. * * We perform the WAY RPC with general cancel delivery disabled. * The RPC prologue is suppose to be transparent and clients can * orphan the call if they get tired of waiting around. */ if (! maybe && (force_way_auth || key_info != NULL || (! idem && ! scall->c.is_cbk))) { if (!force_way_auth && RPC_DG_SCT_IS_WAY_VALIDATED(scall->scte)) { /* * We want to make this check because it's better to be safe * than sorry regarding at-most-once semantics. It's * conceivable that the connection became WAY validated *after* * this call had passed it's initial filtering (if nothing * else, it should protect us from other potential coding * errors :-) */ if (scall->c.call_seq != scall->scte->high_seq) { RPC_DBG_PRINTF(rpc_e_dbg_general, 2, ("(execute_call) Old sequence, previous=%u [%s]\n", scall->scte->high_seq, rpc__dg_act_seq_string(hdrp))); goto END_OF_CALL; } } else { boolean high_seq_was_way_validated = (boolean)(scall->scte->high_seq_is_way_validated); /* * WAY validate the connection and ensure that this call * is the current call. Unlock the scall while performing the * WAY validation. */ rpc_dg_sct_elt_p_t scte; RPC_DG_CALL_UNLOCK(&scall->c); /* * The WAY validation routine must be called with the connection * unlocked. Due to locking hierarchy and the fact that we * unlocked the scall, we've opened up a window... check if * it's safe to continue. */ RPC_LOCK(0); RPC_DG_CALL_LOCK(&scall->c); if (scall->c.state != rpc_e_dg_cs_recv) { RPC_UNLOCK(0); goto END_OF_CALL; } scte = scall->scte; RPC_DG_CALL_UNLOCK(&scall->c); rpc__dg_sct_way_validate(scte, force_way_auth, &st); RPC_UNLOCK(0); RPC_DG_CALL_LOCK(&scall->c); /* * Before continuing, we've "opened up" the call (due to * the unlock/lock) and we need to check if it is safe to * continue... */ if (scall->c.state != rpc_e_dg_cs_recv) goto END_OF_CALL; if (st != rpc_s_ok) { reject_st = rpc_s_who_are_you_failed; goto AFTER_CALL_TO_STUB; } else { if (scall->c.call_seq != scall->scte->high_seq) { RPC_DBG_PRINTF(rpc_e_dbg_general, 2, ("(rpc__dg_execute_call) Old sequence, previous=%u [%s]\n", scall->scte->high_seq, rpc__dg_act_seq_string(hdrp))); goto END_OF_CALL; } } /* * If high_seq_was_way_validated, rpc__dg_call_receive_int() * has already verified the packet by calling * (*auth_epv->recv_ck)(). * It's ok to call it again here except when using * pkt_privacy where the packet body is already decrypted. * For consistency, we don't verify the packet if it's * already done. */ if (key_info != NULL && !force_way_auth && !high_seq_was_way_validated) { unsigned32 blocksize = auth_epv->blocksize; char *cksum; int raw_bodysize; /* * This must be a single buffer fragment. * The very first fragment! */ if (rqe->hdrp == NULL || rqe->frag_len != rqe->pkt_len) { reject_st = rpc_s_who_are_you_failed; goto AFTER_CALL_TO_STUB; } /* * It's not really necessary to round up the packet body * length here because the sender includes the length of * padding before the auth trailer in the packet body length. * However, I think, that's a wrong behavior and we shouldn't * rely on it. */ raw_bodysize = ((rqe->hdrp->len + blocksize - 1) / blocksize) * blocksize; /* * Now that we have obtained authentication * credentials, go back and verify that cksum is * entirely contained inside the packet, and the * auth_type is what we expected. This "shouldn't * fail" unless someone's playing games with us. */ if (((RPC_C_DG_RAW_PKT_HDR_SIZE + raw_bodysize + auth_epv->overhead) > rqe->frag_len) || (rqe->hdrp->auth_proto != auth_epv->auth_proto)) { st = nca_s_proto_error; } else { /* * Adjust the packet buffer's pkt_len, * i.e., excluding the auth trailer. * Also adjust data_len in the iovector. */ rqe->pkt_len = raw_bodysize + RPC_C_DG_RAW_PKT_HDR_SIZE; iove.data_len = raw_bodysize; cksum = rqe->pkt->body.args + raw_bodysize; RPC_DBG_PRINTF(rpc_e_dbg_general, 4, ("(rpc__dg_execute_call) calling recv_ck now\n")); (*auth_epv->recv_ck) (key_info, rqe, cksum, &st); } if (st != rpc_s_ok) { RPC_DBG_PRINTF(rpc_e_dbg_general, 2, ("(rpc__dg_execute_call) pkt didn't verify -- %x\n", st)); reject_st = rpc_s_who_are_you_failed; goto AFTER_CALL_TO_STUB; } } else if (key_info != NULL && force_way_auth) { /* * Call rpc__dg_call_receive_int() again. This time, * (*auth_epv->recv_ck)() is supposed to succeed. */ rpc__dg_call_receive_int(&scall->c, &iove, &st); force_way_auth = false; if (st == rpc_s_dg_need_way_auth) { /* * We still don't own the rqe... */ force_way_auth = true; } if (st != rpc_s_ok) { RPC_DBG_GPRINTF(( "(rpc__dg_execute_call) Receive failed st = 0x%x after forced WAY auth callback\n", st)); reject_st = rpc_s_who_are_you_failed; goto AFTER_CALL_TO_STUB; } assert(rqe == RPC_DG_RECVQ_ELT_FROM_IOVECTOR_ELT(&iove)); } } } assert(force_way_auth == false); /* * If we get here, we're accepting the call and we're gonna dispatch * to the server stub! Setup the required args for the dispatch * (the iove was done above) and run call the server stub. */ RPC_DG_HDR_INQ_DREP(&drep, hdrp); /* * The packet rationing code needs to know that we no longer need * to worry about doing WAYs. */ scall->c.rq.is_way_validated = true; /* * Unlock the call lock while in the stub. */ RPC_DG_CALL_UNLOCK(&scall->c); /* * Note: the stubs are absolutely, positively required to free the * provided iove described buffer (assuming the len > 0), even if * the stub detects and returns an error condition. Set the * "called_stub" flag to true so that we know we don't have to worry * about freeing the RQE ourselves. */ called_stub = true; /* * As required by the packet rationing rules, if the I/O vector element * has no data, free it up now because the server stub doesn't bother * to free such elements. Note that we needed the element until * now for the info that was in its packet header. */ if (iove.data_len == 0 && iove.buff_dealloc != NULL) RPC_FREE_IOVE_BUFFER(&iove); switch (ifspec->stub_rtl_if_vers) { /* * If this is an old v0 or v1 stub runtime interface. Do the * dirty work out of line. */ case RPC_C_STUB_RTL_IF_VERS_NCS_1_0: case RPC_C_STUB_RTL_IF_VERS_NCS_1_5: if (rpc_g_dg_pre_v2_server_call_p == NULL) { /* * rpc_m_pre_v2_ss * "(%s) Can't handle pre-v2 server stubs" */ rpc_dce_svc_printf ( __FILE__, __LINE__, "%s", rpc_svc_server_call, svc_c_sev_fatal | svc_c_action_abort, rpc_m_pre_v2_ss, "rpc__dg_execute_call" ); } prev_cancel_state = dcethread_enableinterrupt_throw(0); (*rpc_g_dg_pre_v2_server_call_p)( ifspec, opnum, (handle_t) h, (rpc_call_handle_t) scall, &iove, drep, ss_epv, mgr_epv, &reject_st); dcethread_enableinterrupt_throw(prev_cancel_state); break; /* * This is the v2 (new) stub runtime interface. */ case RPC_C_STUB_RTL_IF_VERS_DCE_1_0: prev_cancel_state = dcethread_enableinterrupt_throw(0); (*(ss_epv[opnum]))( (handle_t) h, (rpc_call_handle_t) scall, &iove, &drep, &ndr_g_transfer_syntax, mgr_epv, &reject_st); dcethread_enableinterrupt_throw(prev_cancel_state); break; /* * Unknown version */ default: RPC_DBG_GPRINTF(( "(rpc__dg_execute_call) Unknown rtl/if version. 0x%x\n", ifspec->stub_rtl_if_vers)); RPC_DG_CALL_LOCK(&scall->c); if (iove.buff_dealloc != NULL) RPC_FREE_IOVE_BUFFER(&iove); goto END_OF_CALL; } /* * While the stub may have returned due to call orphaning, this will * not typically be the case. Even if it completed succesfully * we could become orphaned further down in this processing (e.g. * in xmitq_push). Defer orphan checking and cleanup till we only * have to do it once; the extra work done if we are orphaned won't * kill us. */ /* * Acquire the call lock since we need it for several pieces of * processing from here on in. * * Before continuing, we've "opened up" the call (due to the * unlock/lock) and we need to check if it is safe to continue... */ RPC_DG_CALL_LOCK(&scall->c); if (scall->c.state != rpc_e_dg_cs_recv && scall->c.state != rpc_e_dg_cs_xmit) { goto END_OF_CALL; } /* * Error cases detected before we get to calling the stub and that want * to send a "reject" re-enter here. */ AFTER_CALL_TO_STUB: RPC_DG_CALL_LOCK_ASSERT(&scall->c); /* * If this was a broadcast request and we're either rejecting the call * or the call faulted, just skip to the end. */ if (broadcast && (reject_st != rpc_s_ok || RPC_DG_HDR_INQ_PTYPE(&scall->c.xq.hdr) == RPC_C_DG_PT_FAULT)) { goto END_OF_CALL; } /* * The stub was obligated to call the iove's dealloc routine, * so we don't have to free that. We don't need the recvq anymore. * In normal cases, the list will already be empty, so having this * in the fast path doesn't hurt and (in the error cases) it frees * up resources while we potentially wait in xmitq_push() (or * awaiting a xqe for a reject or no [outs] response). */ if (scall->c.rq.head != NULL) rpc__dg_recvq_free(&scall->c.rq); /* * If a reject condition exists, prepare the reject response. * Otherwise, handle the case where the stub has no [outs] and it's * not a maybe call; we still need to generate a response pkt. * * We depend on both of these response queuing operations * to only queue the response and not send it since we've yet * setup the return cancel_pending status for the client. */ if (reject_st != rpc_s_ok) { /* * If the reject path caused us to jump over the call into the * stub, we need to free the request RQE here. * * If we are forced to do WAY auth and havn't done it so, don't free * it because we don't own the rqe. */ if (! called_stub && !force_way_auth && iove.buff_dealloc != NULL) RPC_FREE_IOVE_BUFFER(&iove); queue_mapped_reject(scall, reject_st); } else { if (scall->c.state == rpc_e_dg_cs_recv && !maybe) { rpc_iovector_t xmit_data; xmit_data.num_elt = 0; rpc__dg_call_transmit_int(&scall->c, &xmit_data, &st); /* * The transmit may fail because the call is already orphaned. * It may fail for some other reason as well. In either case, * we're not gonna get a response to the client. Just keep * falling through (other calls may fail as well) and clean up. */ } } /* * At this point, we can stop accepting forwarded cancels. Determine * the cancel pending disposition of the call and set the call's * xq cancel_pending flag accordingly so that the response (or at * least the last pkt of the response) gets sent with the proper * state. This is the single point where the "send response" * path ensures that it has flushed any pending cancels from the * call executor thread; this includes cancels generated by * a received cancel-request or a cancel induced by orphan call * processing. * * We could have stopped accepting cancels as soon as the stub * returned, but we really wanted to wait till here before setting * up the return cancel_pending status. After this, we shouldn't * screw around anymore with the xq (i.e. re-initing it). There * should be a reject, fault or normal response queued up and * it should go out with the correct cancel_pending flag. * That is of course, unless that call has been orphaned, in which * case no further response of any kind will be sent to the client * (setting the cancel_pending flag will not affect the client; * which is a *requirement* under this condition). */ if (rpc__cthread_cancel_caf(&scall->c.c)) { RPC_DBG_PRINTF(rpc_e_dbg_cancel, 5, ("(rpc__dg_execute_call) setting cancel_pending\n")); scall->c.xq.base_flags2 |= RPC_C_DG_PF2_CANCEL_PENDING; } /* * Assuming that the call isn't already orphaned, finally push * out the remainder of the response. The push may fail * because orphaning occurs during the push or for some * other reason; just continue to cleanup processing. Indicate * whether or not the response was sent so we can determine * the appropriate call state when we're done. */ if (scall->c.state != rpc_e_dg_cs_orphan) { rpc__dg_call_xmitq_push(&scall->c, &st); if (st == rpc_s_ok) sent_response = true; else RPC_DBG_GPRINTF(( "(rpc__dg_execute_call) xmitq_push returns 0x%x\n", st)); } /* * Error cases that want to skip the reply-sending machinery re-enter here. */ END_OF_CALL: RPC_DG_CALL_LOCK_ASSERT(&scall->c); /* * End of the fast path. * * Any response has been sent (or at least all the pkts have been * sent once). Perform final call wrap-up processing / state * transitioning. In the event that we didn't take the send * response path, we still need to flush any pending cancels. * In the event that we took the send response path but the response * wasn't succesfully sent, we'll call the following twice but * that's ok. */ if (! sent_response) (void) rpc__cthread_cancel_caf(&scall->c.c); /* * If the call is not "idempotent" we must defer complete end of * call processing until the client's ack is received. (Note: "maybe" * and "broadcast" are tagged as "idempotent".) For idempotent calls * with small outs, we can clean up right now (if the client never * gets the response, it can rerun the call). * * Idempotent calls with large outs are treated similarly to * non-idempotent calls. We retain the outs until "acknowledged" * by the client or the retransmit logic gives up. This is required * to prevent the undesireable situation of the client receiving * a "nocall" in response to a "ping" after the client has already * received some of the outs. * * If we didn't (seemingly) successfully send a response, skip the * final state (this covers orphan processing as well). Furthermore, * if the call has been orphaned stay in that state. * * An orphaned call has already been disassociated from its SCTE * (ccall in the case of a cbk_scall) and there should be a maximum * of two references to the orphaned SCALL; the call executor's and * the timer thread. The only actions required are to release any * remaining resources held by the call and release one reference * to the SCALL (the timer thread will eventually complete to job * of destroying the scall). */ if ((! idem || RPC_DG_FLAG_IS_SET(scall->c.xq.base_flags, RPC_C_DG_PF_FRAG)) && sent_response) { RPC_DG_CALL_SET_STATE(&scall->c, rpc_e_dg_cs_final); } else { /* * It's really the end of the call, so we can free the xmitq. */ if (scall->c.xq.head != NULL) rpc__dg_xmitq_free(&scall->c.xq, &scall->c); /* * Typically, the call goes back to the idle state, ready to * handle the next call. First, If this was a callback, update * the callback sequence number in the associated client callback * handle. * * If the call was orphaned, we can't to do either of the above * (we just want to let the scall's timer complete the job of * destroying the scall). */ if (scall->c.state != rpc_e_dg_cs_orphan) { if (scall->c.is_cbk) { scall->cbk_ccall->c.high_seq = scall->c.call_seq; } RPC_DG_CALL_SET_STATE(&scall->c, rpc_e_dg_cs_idle); } } /* * Give up the packet reservation for this call. */ rpc__dg_pkt_cancel_reservation(&scall->c); if (scall->c.is_cbk && scall->cbk_ccall != NULL) { /* * Update the original ccall's high_rcv_frag_size and snd_frag_size. */ scall->cbk_ccall->c.rq.high_rcv_frag_size = scall->c.rq.high_rcv_frag_size; scall->cbk_ccall->c.xq.snd_frag_size = scall->c.xq.snd_frag_size; } /* * We're now done with our scall lock/reference. */ scall->has_call_executor_ref = false; RPC_DG_SCALL_RELEASE(&scall); }
PRIVATE rpc_dg_scall_p_t rpc__dg_scall_alloc ( rpc_dg_sct_elt_p_t scte, rpc_dg_sock_pool_elt_p_t sp, rpc_dg_recvq_elt_p_t rqe ) { rpc_dg_scall_p_t scall; unsigned32 st ATTRIBUTE_UNUSED; static rpc_clock_t rpc_c_dg_scall_timer_freq_init = RPC_CLOCK_SEC(1); boolean maybe = RPC_DG_HDR_FLAG_IS_SET(rqe->hdrp, RPC_C_DG_PF_MAYBE); RPC_LOCK_ASSERT(0); RPC_MEM_ALLOC(scall, rpc_dg_scall_p_t, sizeof *scall, RPC_C_MEM_DG_SCALL, RPC_C_MEM_NOWAIT); /* * Initialize the common SCALL handle fields (and LOCK the SCALL) */ scall_init(scall, sp, rqe); /* * The rest is specific to normal (non-callback) SCALLs. */ scall->cbk_ccall = NULL; scall->c.actid_hash = rpc__dg_uuid_hash(&scte->actid); /* * Initialize the server specific call handle fields */ /* * Setup the SCTE / SCALL cross linkage. */ RPC_DG_SCT_REFERENCE(scte); scall->scte = scte; RPC_DG_CALL_REFERENCE(&scall->c); if (! maybe) scte->scall = scall; else { RPC_DBG_PRINTF(rpc_e_dbg_general, 3, ( "(rpc__dg_scall_alloc) putting call on maybe chain\n")); scall->c.next = (rpc_dg_call_p_t) scte->maybe_chain; scte->maybe_chain = scall; } /* * Initialize the fields of the common call handle header that * are really part of the prototype packet header. */ scall->c.call_actid = scte->actid; scall->c.call_ahint = scte->ahint; scall->c.is_cbk = false; /* * Copy over authentication/keying information. */ scall->c.auth_epv = scte->auth_epv; scall->c.key_info = scte->key_info; if (scall->c.key_info != NULL) RPC_DG_KEY_REFERENCE(scall->c.key_info); RPC_DG_CALL_SET_TIMER(&scall->c, rpc__dg_scall_timer, rpc_c_dg_scall_timer_freq_init); return(scall); }
PRIVATE void rpc__cn_call_executor (pointer_t arg, boolean32 call_was_queued ATTRIBUTE_UNUSED) { rpc_binding_rep_t *binding_r; rpc_cn_call_rep_t *call_r; rpc_iovector_t iovector; dce_uuid_t type_uuid; rpc_mgr_epv_t manager_epv; rpc_v2_server_stub_epv_t server_stub_epv; rpc_if_rep_p_t if_spec_rep; unsigned32 flags; unsigned32 max_calls; unsigned32 max_rpc_size; rpc_if_callback_fn_t if_callback; unsigned32 status; RPC_LOG_CN_CTHD_NTR; RPC_DBG_PRINTF (rpc_e_dbg_general, RPC_C_CN_DBG_GENERAL, ("CN: call_rep->%x call executor running ... %s queued\n", arg, (call_was_queued ? "WAS" : "WAS NOT"))); /* * The arg passed in is really a call rep. */ call_r = (rpc_cn_call_rep_t *) arg; /* * Release the call rep lock which was acquired for us in the * common code. */ RPC_CALL_UNLOCK ((rpc_call_rep_t *) call_r); /* * If there is an object uuid, see if there's a type uuid * associated with it. */ rpc_object_inq_type (&call_r->binding_rep->obj, &type_uuid, &status); if ((status != rpc_s_object_not_found) && (status != rpc_s_ok)) { RPC_CALL_LOCK ((rpc_call_rep_t *) call_r); rpc__cthread_cancel_caf ((rpc_call_rep_t *) call_r); RPC_CALL_UNLOCK ((rpc_call_rep_t *) call_r); RPC_CN_LOCK (); rpc__cn_call_reject ((rpc_call_rep_p_t) call_r, status); RPC_CN_UNLOCK (); goto CLEANUP; } /* * Get the if rep and the server stub and manager EPV. */ rpc__if_lookup2 (call_r->u.server.if_id, call_r->u.server.if_vers, &type_uuid, &call_r->u.server.ihint, &if_spec_rep, &server_stub_epv, &manager_epv, &flags, &max_calls, &max_rpc_size, &if_callback, &status); if (status != rpc_s_ok) { RPC_CALL_LOCK ((rpc_call_rep_t *) call_r); rpc__cthread_cancel_caf ((rpc_call_rep_t *) call_r); RPC_CALL_UNLOCK ((rpc_call_rep_t *) call_r); RPC_CN_LOCK (); rpc__cn_call_reject ((rpc_call_rep_p_t) call_r, status); RPC_CN_UNLOCK (); goto CLEANUP; } /* * If the operation number is out of range, indicate a fault to * the protocol service, otherwise process the incoming packet(s). */ if (call_r->opnum >= if_spec_rep->opcnt) { RPC_CALL_LOCK ((rpc_call_rep_t *) call_r); rpc__cthread_cancel_caf ((rpc_call_rep_t *) call_r); RPC_CALL_UNLOCK ((rpc_call_rep_t *) call_r); RPC_CN_LOCK (); rpc__cn_call_reject ((rpc_call_rep_p_t) call_r, rpc_s_op_rng_error); RPC_CN_UNLOCK (); goto CLEANUP; } /* * Receive the first packet. */ rpc__cn_call_receive ((rpc_call_rep_t *) call_r, &iovector.elt[0], &status); if (status != rpc_s_ok) { RPC_CALL_LOCK ((rpc_call_rep_t *) call_r); rpc__cthread_cancel_caf ((rpc_call_rep_t *) call_r); RPC_CALL_UNLOCK ((rpc_call_rep_t *) call_r); RPC_CN_LOCK (); rpc__cn_call_reject ((rpc_call_rep_p_t) call_r, rpc_s_op_rng_error); RPC_CN_UNLOCK (); goto CLEANUP; } /* * Mark the call as having executed. */ call_r->call_executed = true; /* * Enable posting of cancels to this call executor thread. * This will also post any queued cancels. */ RPC_DBG_PRINTF (rpc_e_dbg_cancel, RPC_C_CN_DBG_CANCEL, ("(rpc__cn_call_executor) call_rep->%x enabling posting of cancels and posting any queued cancels\n", call_r)); RPC_CALL_LOCK ((rpc_call_rep_t *) call_r); rpc__cthread_cancel_enable_post ((rpc_call_rep_p_t) call_r); RPC_CALL_UNLOCK ((rpc_call_rep_t *) call_r); /* * Dispatch appropriately depending on the stub version. */ switch (if_spec_rep->stub_rtl_if_vers) { /* * If this is an old v0 or v1 stub runtime interface * then do the dirty work out of line. */ case 0: case 1: /* * rpc_m_pre_v2_ifspec * "(%s) Pre-v2 interface spec" */ RPC_DCE_SVC_PRINTF (( DCE_SVC(RPC__SVC_HANDLE, "%s"), rpc_svc_server_call, svc_c_sev_fatal | svc_c_action_abort, rpc_m_pre_v2_ifspec, "rpc__cn_call_executor" )); break; /* * This is the v2 (new) stub runtime interface. */ case 2: RPC_LOG_SERVER_STUB_PRE; ((*server_stub_epv[call_r->opnum])) ((handle_t) call_r->binding_rep, (rpc_call_handle_t) call_r, &iovector.elt[0], &(RPC_CN_ASSOC_NDR_FORMAT (call_r->assoc)), &call_r->transfer_syntax, manager_epv, &status); RPC_LOG_SERVER_STUB_POST; break; /* * Unknown version */ default: /* * rpc_m_unk_ifspec * "(%s) Unknown interface spec version" */ RPC_DCE_SVC_PRINTF (( DCE_SVC(RPC__SVC_HANDLE, "%s"), rpc_svc_server_call, svc_c_sev_fatal | svc_c_action_abort, rpc_m_pre_v2_ifspec, "rpc__cn_call_executor" )); break; } /* * Check for an error while in the server stub but before the * manager routine was entered. */ if (status != rpc_s_ok) { RPC_CALL_LOCK ((rpc_call_rep_t *) call_r); rpc__cthread_cancel_caf ((rpc_call_rep_t *) call_r); RPC_CALL_UNLOCK ((rpc_call_rep_t *) call_r); RPC_CN_LOCK (); rpc__cn_call_reject ((rpc_call_rep_p_t) call_r, status); RPC_CN_UNLOCK (); goto CLEANUP; } /* * If the stub returned successfully, end the call and free the binding handle. */ binding_r = (rpc_binding_rep_t *) call_r->binding_rep; RPC_CALL_LOCK ((rpc_call_rep_t *) call_r); rpc__cthread_cancel_caf ((rpc_call_rep_t *) call_r); RPC_CALL_UNLOCK ((rpc_call_rep_t *) call_r); CLEANUP: binding_r = (rpc_binding_rep_t *) call_r->binding_rep; rpc__cn_call_end ((rpc_call_rep_p_t *) &call_r, &status); RPC_LOCK (0); RPC_BINDING_RELEASE (&binding_r, &status); RPC_UNLOCK (0); RPC_LOG_CN_CTHD_XIT; }
PRIVATE void rpc__cn_transmit_buffers ( rpc_cn_call_rep_p_t call_rep, unsigned32 *status ) { rpc_cn_packet_p_t header_p; /* * Write the bytecount accumulated thus far into the fragment * length field of the cached protocol header. */ *status = rpc_s_ok; header_p = (rpc_cn_packet_p_t) RPC_CN_CREP_SEND_HDR (call_rep); RPC_CN_PKT_FRAG_LEN (header_p) = RPC_CN_CREP_ACC_BYTCNT (call_rep); /* * Set the alloc hint; appears that NetApp's RPC implementation * depends on this. */ RPC_CN_PKT_ALLOC_HINT (header_p) = RPC_CN_CREP_ACC_BYTCNT (call_rep) - RPC_CN_CREP_SIZEOF_HDR (call_rep); if (RPC_CALL_IS_CLIENT (((rpc_call_rep_t *) call_rep))) { /* * Check for pending cancels if sending a request. Set the flag * in the request header to forward the cancel if there is one * pending and this is the first fragment of the request. */ if (RPC_CN_PKT_FLAGS (header_p) & RPC_C_CN_FLAGS_FIRST_FRAG) { if (call_rep->u.client.cancel.local_count) { RPC_DBG_PRINTF (rpc_e_dbg_cancel, RPC_C_CN_DBG_CANCEL, ("(rpc__cn_transmit_buffers) setting alert pending bit in request header for queued cancel\n")); RPC_CN_PKT_FLAGS (header_p) |= RPC_C_CN_FLAGS_ALERT_PENDING; call_rep->u.client.cancel.local_count--; } else { DCETHREAD_TRY { dcethread_checkinterrupt (); } DCETHREAD_CATCH (dcethread_interrupt_e) { RPC_DBG_PRINTF (rpc_e_dbg_cancel, RPC_C_CN_DBG_CANCEL, ("(rpc__cn_transmit_buffers) setting alert pending bit in request header for cancel just detected\n")); RPC_CN_PKT_FLAGS (header_p) |= RPC_C_CN_FLAGS_ALERT_PENDING; rpc__cn_call_start_cancel_timer (call_rep, status); } DCETHREAD_ENDTRY } if (*status != rpc_s_ok) { return; } } RPC_DBG_PRINTF (rpc_e_dbg_cancel, RPC_C_CN_DBG_CANCEL, ("(rpc__cn_transmit_buffers) setting flag indicating first frag has been sent\n")); call_rep->u.client.cancel.server_is_accepting = true; call_rep->num_pkts = 0; } /* * If security was requested attach the authentication trailer * to the last iovector element. Make sure to add padding, if * required to the stub data to ensure the trailer starts on a * 4-byte boundary. */ if (call_rep->sec != NULL) { rpc_iovector_elt_p_t iov_p; rpc_cn_auth_tlr_t *auth_tlr; /* * Remove the authentication trailer size from the header * iovector element. This was added by * RPC_CN_CREP_ADJ_IOV_FOR_TLR. */ (RPC_CN_CREP_IOV(call_rep)[0]).data_len -= call_rep->prot_tlr->data_size; /* * Now adjust some fields in the auth trailer. The auth * trailer must start on a 4-byte boundary. Pad the user, or * stub, data to make it so. The amount of padding is * contained in the auth trailer so that the receiver can * determine the real user data size. */ auth_tlr = (rpc_cn_auth_tlr_t *)call_rep->prot_tlr->data_p; auth_tlr->stub_pad_length = (4 - ((RPC_CN_CREP_ACC_BYTCNT (call_rep) - call_rep->prot_tlr->data_size) & 0x03)) & 0x03; (RPC_CN_CREP_IOV(call_rep)[RPC_CN_CREP_IOVLEN(call_rep) - 2]).data_len += auth_tlr->stub_pad_length; RPC_CN_PKT_FRAG_LEN (header_p) += auth_tlr->stub_pad_length - RPC_CN_CREP_SIZEOF_TLR_PAD (call_rep); /* * Hook the auth trailer iovector element after the last * iovector element. */ iov_p = &(RPC_CN_CREP_IOV(call_rep)[RPC_CN_CREP_IOVLEN(call_rep) - 1]); iov_p->buff_dealloc = NULL; iov_p->data_len = call_rep->prot_tlr->data_size - RPC_CN_CREP_SIZEOF_TLR_PAD (call_rep) ; iov_p->data_addr = (byte_p_t) call_rep->prot_tlr->data_p; } /* * Send the buffers in the iovector out over the association. */ rpc__cn_assoc_send_frag (call_rep->assoc, &(call_rep->buffered_output.iov), call_rep->sec, status); /* * Clear the first frag flag bit in the cached protocol header * so that subsequent packets will not have the bit set. */ RPC_CN_PKT_FLAGS (header_p) &= ~RPC_C_CN_FLAGS_FIRST_FRAG; /* * Update the count of packets sent and received for this call. */ call_rep->num_pkts++; }
/* * R P C _ _ S E R V E R _ F W D _ R E S O L V E _ D E A L Y E D * * Remove specified packet from the list of delayed packets * and do what we are told with it */ PRIVATE void rpc__server_fwd_resolve_delayed( dce_uuid_p_t actuuid, rpc_addr_p_t fwd_addr, rpc_fwd_action_t *fwd_action, unsigned32 *status) { rpc_dg_sock_pool_elt_p_t sp; rpc_dg_recvq_elt_p_t rqe = (rpc_dg_recvq_elt_p_t)-1; rpc_dg_pkt_hdr_p_t hdrp; pkt_list_element_t *ep, *last_ep = NULL; unsigned32 st; /* get the requsted packet from the list */ *status = rpc_s_not_found; RPC_MUTEX_LOCK(fwd_list_mutex); ep = delayed_pkt_head; while (ep != NULL) { hdrp = ep->rqe->hdrp; if (dce_uuid_equal(&(hdrp->actuid), actuuid, &st) && (st == rpc_s_ok)) { /* found - remove it from the list */ rqe = ep->rqe; sp = ep->sp; if (last_ep == NULL) { delayed_pkt_head = ep->next; } else { last_ep->next = ep->next; } RPC_MEM_FREE(ep, RPC_C_MEM_UTIL); *status = rpc_s_ok; break; } last_ep = ep; ep = ep->next; } RPC_MUTEX_UNLOCK(fwd_list_mutex); if (*status != rpc_s_ok) { return; } /* * Do what we're told to do with this packet. */ switch (*fwd_action) { case rpc_e_fwd_drop: RPC_DBG_PRINTF(rpc_e_dbg_general, 10, ("(rpc__server_fwd_resolve_delayed) dropping (ptype=%s) [%s]\n", rpc__dg_pkt_name(RPC_DG_HDR_INQ_PTYPE(rqe->hdrp)), rpc__dg_act_seq_string(rqe->hdrp))); break; case rpc_e_fwd_reject: fwd_reject(sp, rqe); break; case rpc_e_fwd_forward: fwd_forward(sp, rqe, fwd_addr); break; default: *status = rpc_s_not_supported; break; } rpc__dg_network_sock_release(&sp); if (rqe == (rpc_dg_recvq_elt_p_t)-1) { fprintf(stderr, "%s: bad rqe: aborting\n", __PRETTY_FUNCTION__); abort(); } rpc__dg_pkt_free_rqe(rqe, NULL); return; }
PRIVATE void rpc__dg_plog_dump(void) { unsigned16 i; unsigned32 st; static char *lossy_action = "d?r "; /* (0)drop, (1)?, (2)rexmit, (3)normal */ RPC_LOCK_ASSERT(0); if (rpc_g_dg_pkt_log == NULL) { RPC_DBG_PRINTF(rpc_e_dbg_dg_pktlog, 1, ("rpc__dg_plog_dump called, but DG Pkt Logging never enabled\n") ); return; } RPC_DBG_PRINTF(rpc_e_dbg_dg_pktlog, 1, ("tstamp ver ptyp f1 f2 seq/fnum/sn ihnt ahnt len interface/ver/op activity sboot object drep at\n") ); RPC_DBG_PRINTF(rpc_e_dbg_dg_pktlog, 1, ("---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------\n") ); for (i = 0; i < RPC_C_DG_PKT_LOG_SIZE; i++) { pktlog_elt_p_t p = &rpc_g_dg_pkt_log[i]; unsigned_char_p_t obj, iface, act; rpc_dg_pkt_hdr_p_t hdrp; #ifndef MISPACKED_HDR hdrp = (rpc_dg_pkt_hdr_p_t) &p->hdr; #else hdrp = converted local rep of raw hdr #endif if (p->timestamp == 0) break; dce_uuid_to_string(&hdrp->object, &obj, &st); dce_uuid_to_string(&hdrp->if_id, &iface, &st); dce_uuid_to_string(&hdrp->actuid, &act, &st); RPC_DBG_PRINTF(rpc_e_dbg_dg_pktlog, 1, ("%08x %c%c%1u %-4.4s %02x %02x %08x/%04x/%04x %04x %04x %4d %s/%02u/%03u %s %9u %s %02x%02x%02x %02x\n", p->timestamp, (hdrp->_rpc_vers & 0x80) ? 'R' : lossy_action[p->lossy_action], ((i + 1) % RPC_C_DG_PKT_LOG_SIZE == pkt_log_index) ? '*' : ' ', hdrp->_rpc_vers & 0x7f, rpc__dg_pkt_name(RPC_DG_HDR_INQ_PTYPE(hdrp)), hdrp->flags, hdrp->flags2, hdrp->seq, hdrp->fragnum, hdrp->serial_hi << 8 | hdrp->serial_lo, hdrp->ihint, hdrp->ahint, hdrp->len, iface, hdrp->if_vers, hdrp->opnum, act, hdrp->server_boot, obj, hdrp->drep[0], hdrp->drep[1], hdrp->drep[2], hdrp->auth_proto) ); rpc_string_free(&obj, &st); rpc_string_free(&act, &st); rpc_string_free(&iface, &st); } }
PRIVATE unsigned32 rpc__dg_fwd_pkt ( rpc_dg_sock_pool_elt_p_t sp, rpc_dg_recvq_elt_p_t rqe) { rpc_dg_pkt_hdr_p_t hdrp = rqe->hdrp; rpc_if_id_t if_id; unsigned32 rpc_prot_vers_major; unsigned32 rpc_prot_vers_minor; rpc_addr_p_t fwd_addr; rpc_fwd_action_t fwd_action; unsigned32 st; /* * First determine whether or not the pkt is for the forwarder server. * There are a few approaches that could be taken. We can (a) filter * based on interface id (i.e. if the pkt is for a interface that * is supported by the forwarder server, including conv_, then we * can handle it just like a normal pkt). We could (b) try to do * something based on a combination of activity id and interface * ids (e.g. is the pkt for an activity that we know about...). Lastly * (perhaps) we could (c) see if we can find a forwarder match and * if not then just handle it normally. * * For now we use method (c) since most pkts to the forwarder will * be for forwarding not for operations on the forwarder. Note: * NCS 1.5.1 RPC_C_DG_PT_ACK and RPC_C_DG_PT_REJECT pkts do not have * a valid interface id so we just assume that they must be for the * forwarder server (i.e. they couldn't have ever been forwarded). * Also, a pkt with interface id nil is not valid so don't try to * forward it. */ if (RPC_DG_HDR_INQ_PTYPE(hdrp) == RPC_C_DG_PT_ACK || RPC_DG_HDR_INQ_PTYPE(hdrp) == RPC_C_DG_PT_REJECT || UUID_IS_NIL(&hdrp->if_id, &st)) { return (FWD_PKT_NOTDONE); } if_id.uuid = hdrp->if_id; if_id.vers_major = RPC_IF_VERS_MAJOR(hdrp->if_vers); if_id.vers_minor = RPC_IF_VERS_MINOR(hdrp->if_vers); rpc_prot_vers_major = RPC_IF_VERS_MAJOR(RPC_C_DG_PROTO_VERS); rpc_prot_vers_minor = RPC_IF_VERS_MINOR(RPC_C_DG_PROTO_VERS); /* * Invoke the endpoint mapper's registered forwarding map * function to locate an appropriate forwarding addr. */ /* !!! RPC_UNLOCK_ASSERT(0); couldn't fix mainline com.h so forget it for now */ (* rpc_g_fwd_fn) ( &hdrp->object, &if_id, &ndr_g_transfer_syntax.id, (rpc_protocol_id_t) RPC_C_PROTOCOL_ID_NCADG, rpc_prot_vers_major, rpc_prot_vers_minor, (rpc_addr_p_t) &rqe->from, &hdrp->actuid, &fwd_addr, &fwd_action, &st); if (st != rpc_s_ok) { RPC_DBG_GPRINTF( ("(rpc__dg_fwd_pkt) fwd map function returned error (st=%08lx, ptype=%s) [%s]\n", st, rpc__dg_pkt_name(RPC_DG_HDR_INQ_PTYPE(hdrp)), rpc__dg_act_seq_string(hdrp))); return (FWD_PKT_NOTDONE); } /* * Do what we're told to do with this packet. */ switch (fwd_action) { case rpc_e_fwd_drop: RPC_DBG_PRINTF(rpc_e_dbg_general, 10, ("(rpc__dg_forward_pkt) dropping (ptype=%s) [%s]\n", rpc__dg_pkt_name(RPC_DG_HDR_INQ_PTYPE(hdrp)), rpc__dg_act_seq_string(hdrp))); return (FWD_PKT_NOTDONE); case rpc_e_fwd_reject: fwd_reject(sp, rqe); return (FWD_PKT_DONE); case rpc_e_fwd_forward: fwd_forward(sp, rqe, fwd_addr); return (FWD_PKT_DONE); case rpc_e_fwd_delayed: fwd_delayed(sp, rqe); return(FWD_PKT_DELAYED); default: fprintf(stderr, "%s: unhandled fwd_action %d[%x]; aborting\n", __PRETTY_FUNCTION__, fwd_action, fwd_action); abort(); } }
INTERNAL void rpc__ntlmauth_bnd_set_auth ( unsigned_char_p_t server_name, rpc_authn_level_t level, rpc_auth_identity_handle_t auth_ident, rpc_authz_protocol_id_t authz_prot, rpc_binding_handle_t binding_h, rpc_auth_info_p_t *infop, unsigned32 *stp ) { unsigned32 st = rpc_s_ok; rpc_ntlmssp_auth_ident_t_p auth_info = NULL; rpc_ntlmauth_info_p_t ntlmauth_info = NULL; gss_name_t gss_server_name = {0}; unsigned char *str_server_name = NULL; gss_buffer_desc username_buf = {0}; gss_name_t gss_user_name = NULL; int gss_rc = 0; OM_uint32 minor_status = 0; gss_OID_set_desc desired_mech; gss_OID_set ret_mech; gss_cred_id_t cred_handle = GSS_C_NO_CREDENTIAL; OM_uint32 time_rec = 0; gss_OID_desc gss_ntlm_oid_desc = {0}; gss_OID_desc gss_cred_opt_password_oid_desc = {0}; gss_buffer_desc auth_buffer = {0}; RPC_DBG_PRINTF(rpc_e_dbg_auth, RPC_C_CN_DBG_AUTH_ROUTINE_TRACE, ("(rpc__gssauth_bnd_set_auth)\n")); rpc_g_ntlmauth_alloc_count++; RPC_MEM_ALLOC(ntlmauth_info, rpc_ntlmauth_info_p_t, sizeof (*ntlmauth_info), RPC_C_MEM_NTLMAUTH_INFO, RPC_C_MEM_WAITOK); memset(ntlmauth_info, 0, sizeof(*ntlmauth_info)); if (authz_prot != rpc_c_authz_name) { st = rpc_s_authn_authz_mismatch; goto poison; } if ((level != rpc_c_authn_level_connect) && (level != rpc_c_authn_level_pkt_integrity) && (level != rpc_c_authn_level_pkt_privacy)) { st = rpc_s_unsupported_authn_level; goto poison; } if (server_name == NULL || auth_ident == NULL) { st = rpc_s_invalid_arg; goto poison; } auth_info = (rpc_ntlmssp_auth_ident_t_p)auth_ident; if (authz_prot == rpc_c_authz_name) { gss_buffer_desc input_name; /* GSS_KRB5_NT_PRINCIPAL_NAME */ gss_OID_desc nt_principal = {10, "\x2a\x86\x48\x86\xf7\x12\x01\x02\x02\x01"}; int gss_rc = 0; OM_uint32 minor_status = 0; if (server_name == NULL) { rpc_mgmt_inq_server_princ_name(binding_h, rpc_c_authn_winnt, &str_server_name, &st); if (st != rpc_s_ok) { goto poison; } } else { str_server_name = rpc_stralloc(server_name); } input_name.value = (void *)str_server_name; input_name.length = strlen((char *)str_server_name); gss_rc = gss_import_name(&minor_status, &input_name, &nt_principal, &gss_server_name); if (gss_rc != GSS_S_COMPLETE) { char msg[256] = {0}; rpc__ntlmauth_error_map(gss_rc, minor_status, GSS_C_NO_OID, msg, sizeof(msg), &st); RPC_DBG_PRINTF(rpc_e_dbg_auth, RPC_C_CN_DBG_AUTH_GENERAL, ("(rpc__gssauth_bnd_set_auth): import: %s\n", msg)); goto poison; } } gss_ntlm_oid_desc.length = GSS_MECH_NTLM_LEN; gss_ntlm_oid_desc.elements = GSS_MECH_NTLM; gss_cred_opt_password_oid_desc.length = GSS_CRED_OPT_PW_LEN; gss_cred_opt_password_oid_desc.elements = GSS_CRED_OPT_PW; username_buf.value = auth_info->User; username_buf.length = auth_info->UserLength; gss_rc = gss_import_name(&minor_status, &username_buf, GSS_C_NT_USER_NAME, &gss_user_name); if (gss_rc != GSS_S_COMPLETE) { char msg[256] = {0}; rpc__ntlmauth_error_map(gss_rc, minor_status, GSS_C_NO_OID, msg, sizeof(msg), &st); RPC_DBG_PRINTF(rpc_e_dbg_auth, RPC_C_CN_DBG_AUTH_GENERAL, ("(rpc__ntlmauth_bnd_set_auth): import: %s\n", msg)); goto poison; } desired_mech.elements = (gss_OID)&gss_ntlm_oid_desc; desired_mech.count = 1; gss_rc = gss_acquire_cred(&minor_status, gss_user_name, 0, &desired_mech, GSS_C_INITIATE, &cred_handle, &ret_mech, &time_rec); if (gss_rc != GSS_S_COMPLETE) { char msg[256] = {0}; rpc__ntlmauth_error_map(gss_rc, minor_status, GSS_C_NO_OID, msg, sizeof(msg), &st); RPC_DBG_PRINTF(rpc_e_dbg_auth, RPC_C_CN_DBG_AUTH_GENERAL, ("(rpc__ntlmauth_bnd_set_auth): import: %s\n", msg)); goto poison; } auth_buffer.value = auth_info; auth_buffer.length = sizeof(*auth_info); gss_rc = gssspi_set_cred_option(&minor_status, cred_handle, (gss_OID)&gss_cred_opt_password_oid_desc, &auth_buffer); if (gss_rc != GSS_S_COMPLETE) { char msg[256] = {0}; rpc__ntlmauth_error_map(gss_rc, minor_status, GSS_C_NO_OID, msg, sizeof(msg), &st); RPC_DBG_PRINTF(rpc_e_dbg_auth, RPC_C_CN_DBG_AUTH_GENERAL, ("(rpc__ntlmauth_bnd_set_auth): import: %s\n", msg)); goto poison; } ntlmauth_info->auth_info.server_princ_name = str_server_name; ntlmauth_info->auth_info.authn_level = level; ntlmauth_info->auth_info.authn_protocol = rpc_c_authn_winnt; ntlmauth_info->auth_info.authz_protocol = authz_prot; ntlmauth_info->auth_info.is_server = 0; ntlmauth_info->auth_info.u.auth_identity = auth_ident; ntlmauth_info->auth_info.refcount = 1; ntlmauth_info->gss_server_name = gss_server_name; ntlmauth_info->gss_creds = cred_handle; if (gss_user_name) { gss_release_name(&minor_status, &gss_user_name); } *infop = &ntlmauth_info->auth_info; *stp = st; return; poison: *infop = NULL; *stp = st; return; }