INTERNAL boolean32 scall_uncache ( rpc_dg_scall_p_t scall ) { unsigned32 st; boolean b; RPC_TRY_LOCK(&b); if (! b) { RPC_DBG_GPRINTF(("(scall_uncache) couldn't get global lock\n")); return false; } RPC_DG_CALL_LOCK_ASSERT(&scall->c); assert(scall->c.state == rpc_e_dg_cs_idle || scall->c.state == rpc_e_dg_cs_orphan); if (scall->c.is_cbk) { /* * This is a *client side* callback scall; dissociate from our * cbk_ccall if necessary. */ if (scall->cbk_ccall != NULL) { rpc_dg_ccall_p_t ccall = scall->cbk_ccall; assert(ccall->cbk_scall == scall); /* * Acquire the callback ccall lock. Note the locking hierarchy * for this type of call handle pairing is: cbk_ccall, is_cbk scall * (see dg.h). */ RPC_DG_CALL_TRY_LOCK(&ccall->c, &b); if (! b) { RPC_DBG_GPRINTF( ("(scall_uncache) couldn't get cbk_scall->cbk_ccall lock\n")); RPC_UNLOCK(0); return false; } ccall->cbk_start = false; RPC_DG_CCALL_RELEASE(&scall->cbk_ccall); RPC_DG_SCALL_RELEASE_NO_UNLOCK(&ccall->cbk_scall); } } else { /* * This is a normal (server side) scall. */ /* * If this server side scall has been part of a callback back * to the client, free up the cached *server side* callback ccall * resources. */ if (scall->cbk_ccall != NULL) { rpc_dg_ccall_p_t ccall = scall->cbk_ccall; assert(ccall->cbk_scall == scall); /* * Acquire the callback ccall lock. Note the locking hierarchy * for this type of call handle pairing is: scall, is_cbk ccall * (see dg.h). */ RPC_DG_CALL_LOCK(&ccall->c); rpc__dg_ccall_free_prep(ccall); /* * Release the reference the CCALL has to its originating SCALL. */ RPC_DG_SCALL_RELEASE_NO_UNLOCK(&ccall->cbk_scall); /* * Release the reference the SCALL has to the CCALL it used for * the callback. Then call free_handle, which will stop the * timer and release the client binding handles reference to * the CCALL. */ RPC_DG_CCALL_RELEASE(&scall->cbk_ccall); RPC_BINDING_RELEASE((rpc_binding_rep_p_t *) &ccall->h, &st); } /* * Dissociate the scall from its scte if necessary. Presumably, * the only time that the scall won't have a scte is if the call * had been orphaned, though we don't count on that. */ if (scall->scte != NULL) { release_scall_from_scte(scall); /* * Release the SCALL's reference to the SCTE. */ RPC_DG_SCT_RELEASE(&scall->scte); } } /* * Common scall uncache processing. */ RPC_DBG_PRINTF(rpc_e_dbg_general, 3, ("(scall_uncache) Freeing cached SCALL [%s]\n", rpc__dg_act_seq_string(&scall->c.xq.hdr))); /* * Dissociate the scall from the server binding handle if necessary. */ if (scall->h != NULL) { RPC_DG_SCALL_RELEASE_NO_UNLOCK(&scall->h->scall); RPC_BINDING_RELEASE((rpc_binding_rep_p_t *) &scall->h, &st); } /* * Stop the scall's timer and dissociate it from the scall. */ rpc__timer_clear(&scall->c.timer); RPC_DG_SCALL_RELEASE(&scall); RPC_UNLOCK(0); return true; }
PRIVATE void rpc__cn_call_executor (pointer_t arg, boolean32 call_was_queued ATTRIBUTE_UNUSED) { rpc_binding_rep_t *binding_r; rpc_cn_call_rep_t *call_r; rpc_iovector_t iovector; dce_uuid_t type_uuid; rpc_mgr_epv_t manager_epv; rpc_v2_server_stub_epv_t server_stub_epv; rpc_if_rep_p_t if_spec_rep; unsigned32 flags; unsigned32 max_calls; unsigned32 max_rpc_size; rpc_if_callback_fn_t if_callback; unsigned32 status; RPC_LOG_CN_CTHD_NTR; RPC_DBG_PRINTF (rpc_e_dbg_general, RPC_C_CN_DBG_GENERAL, ("CN: call_rep->%x call executor running ... %s queued\n", arg, (call_was_queued ? "WAS" : "WAS NOT"))); /* * The arg passed in is really a call rep. */ call_r = (rpc_cn_call_rep_t *) arg; /* * Release the call rep lock which was acquired for us in the * common code. */ RPC_CALL_UNLOCK ((rpc_call_rep_t *) call_r); /* * If there is an object uuid, see if there's a type uuid * associated with it. */ rpc_object_inq_type (&call_r->binding_rep->obj, &type_uuid, &status); if ((status != rpc_s_object_not_found) && (status != rpc_s_ok)) { RPC_CALL_LOCK ((rpc_call_rep_t *) call_r); rpc__cthread_cancel_caf ((rpc_call_rep_t *) call_r); RPC_CALL_UNLOCK ((rpc_call_rep_t *) call_r); RPC_CN_LOCK (); rpc__cn_call_reject ((rpc_call_rep_p_t) call_r, status); RPC_CN_UNLOCK (); goto CLEANUP; } /* * Get the if rep and the server stub and manager EPV. */ rpc__if_lookup2 (call_r->u.server.if_id, call_r->u.server.if_vers, &type_uuid, &call_r->u.server.ihint, &if_spec_rep, &server_stub_epv, &manager_epv, &flags, &max_calls, &max_rpc_size, &if_callback, &status); if (status != rpc_s_ok) { RPC_CALL_LOCK ((rpc_call_rep_t *) call_r); rpc__cthread_cancel_caf ((rpc_call_rep_t *) call_r); RPC_CALL_UNLOCK ((rpc_call_rep_t *) call_r); RPC_CN_LOCK (); rpc__cn_call_reject ((rpc_call_rep_p_t) call_r, status); RPC_CN_UNLOCK (); goto CLEANUP; } /* * If the operation number is out of range, indicate a fault to * the protocol service, otherwise process the incoming packet(s). */ if (call_r->opnum >= if_spec_rep->opcnt) { RPC_CALL_LOCK ((rpc_call_rep_t *) call_r); rpc__cthread_cancel_caf ((rpc_call_rep_t *) call_r); RPC_CALL_UNLOCK ((rpc_call_rep_t *) call_r); RPC_CN_LOCK (); rpc__cn_call_reject ((rpc_call_rep_p_t) call_r, rpc_s_op_rng_error); RPC_CN_UNLOCK (); goto CLEANUP; } /* * Receive the first packet. */ rpc__cn_call_receive ((rpc_call_rep_t *) call_r, &iovector.elt[0], &status); if (status != rpc_s_ok) { RPC_CALL_LOCK ((rpc_call_rep_t *) call_r); rpc__cthread_cancel_caf ((rpc_call_rep_t *) call_r); RPC_CALL_UNLOCK ((rpc_call_rep_t *) call_r); RPC_CN_LOCK (); rpc__cn_call_reject ((rpc_call_rep_p_t) call_r, rpc_s_op_rng_error); RPC_CN_UNLOCK (); goto CLEANUP; } /* * Mark the call as having executed. */ call_r->call_executed = true; /* * Enable posting of cancels to this call executor thread. * This will also post any queued cancels. */ RPC_DBG_PRINTF (rpc_e_dbg_cancel, RPC_C_CN_DBG_CANCEL, ("(rpc__cn_call_executor) call_rep->%x enabling posting of cancels and posting any queued cancels\n", call_r)); RPC_CALL_LOCK ((rpc_call_rep_t *) call_r); rpc__cthread_cancel_enable_post ((rpc_call_rep_p_t) call_r); RPC_CALL_UNLOCK ((rpc_call_rep_t *) call_r); /* * Dispatch appropriately depending on the stub version. */ switch (if_spec_rep->stub_rtl_if_vers) { /* * If this is an old v0 or v1 stub runtime interface * then do the dirty work out of line. */ case 0: case 1: /* * rpc_m_pre_v2_ifspec * "(%s) Pre-v2 interface spec" */ RPC_DCE_SVC_PRINTF (( DCE_SVC(RPC__SVC_HANDLE, "%s"), rpc_svc_server_call, svc_c_sev_fatal | svc_c_action_abort, rpc_m_pre_v2_ifspec, "rpc__cn_call_executor" )); break; /* * This is the v2 (new) stub runtime interface. */ case 2: RPC_LOG_SERVER_STUB_PRE; ((*server_stub_epv[call_r->opnum])) ((handle_t) call_r->binding_rep, (rpc_call_handle_t) call_r, &iovector.elt[0], &(RPC_CN_ASSOC_NDR_FORMAT (call_r->assoc)), &call_r->transfer_syntax, manager_epv, &status); RPC_LOG_SERVER_STUB_POST; break; /* * Unknown version */ default: /* * rpc_m_unk_ifspec * "(%s) Unknown interface spec version" */ RPC_DCE_SVC_PRINTF (( DCE_SVC(RPC__SVC_HANDLE, "%s"), rpc_svc_server_call, svc_c_sev_fatal | svc_c_action_abort, rpc_m_pre_v2_ifspec, "rpc__cn_call_executor" )); break; } /* * Check for an error while in the server stub but before the * manager routine was entered. */ if (status != rpc_s_ok) { RPC_CALL_LOCK ((rpc_call_rep_t *) call_r); rpc__cthread_cancel_caf ((rpc_call_rep_t *) call_r); RPC_CALL_UNLOCK ((rpc_call_rep_t *) call_r); RPC_CN_LOCK (); rpc__cn_call_reject ((rpc_call_rep_p_t) call_r, status); RPC_CN_UNLOCK (); goto CLEANUP; } /* * If the stub returned successfully, end the call and free the binding handle. */ binding_r = (rpc_binding_rep_t *) call_r->binding_rep; RPC_CALL_LOCK ((rpc_call_rep_t *) call_r); rpc__cthread_cancel_caf ((rpc_call_rep_t *) call_r); RPC_CALL_UNLOCK ((rpc_call_rep_t *) call_r); CLEANUP: binding_r = (rpc_binding_rep_t *) call_r->binding_rep; rpc__cn_call_end ((rpc_call_rep_p_t *) &call_r, &status); RPC_LOCK (0); RPC_BINDING_RELEASE (&binding_r, &status); RPC_UNLOCK (0); RPC_LOG_CN_CTHD_XIT; }