/** * Ensures the existence of a client-side transport on the TCP connection. * * @param[in] xprt Server-side RPC transport. * @retval true Success. * @retval false System error. `log_start()` called. */ static bool up7_ensureClientTransport( struct SVCXPRT* const xprt) { bool status; if (NULL == clnt) { /* * Create a client-side RPC transport on the TCP connection. */ do { clnt = clnttcp_create(&xprt->xp_raddr, LDMPROG, SEVEN, &xprt->xp_sock, MAX_RPC_BUF_NEEDED, 0); /* TODO: adjust sending buffer size in above */ } while (clnt == NULL && rpc_createerr.cf_stat == RPC_TIMEDOUT); if (clnt == NULL ) { LOG_START2("Couldn't create client-side transport to downstream LDM-7 on " "%s%s", hostbyaddr(&xprt->xp_raddr), clnt_spcreateerror("")); status = false; } else { status = true; } } return status; }
/* * Handles incoming LDM-4 RPC NULLPROC requests (only). This method is * directly and repeatedly invoked by the RPC layer after svc_run(3NSL) is * invoked. * * rqstp The RPC request. * transp The server-side RPC transport. */ void ldmprog_4(struct svc_req *rqstp, register SVCXPRT *transp) { if (rqstp->rq_proc == 0) { /* NULLPROC */ unotice("ldmprog_4: ldmping from %s", hostbyaddr((struct sockaddr_in*)svc_getcaller(transp))); (void)svc_sendreply(transp, (xdrproc_t) xdr_void, (char *)NULL); } else { svcerr_noproc(transp); } }
hiya_reply_t* hiya_6_svc( prod_class_t *offered, struct svc_req *rqstp) { const char* const pqfname = getQueuePath(); static hiya_reply_t reply; SVCXPRT * const xprt = rqstp->rq_xprt; struct sockaddr_in *upAddr = (struct sockaddr_in*) svc_getcaller(xprt); const char *upName = hostbyaddr(upAddr); int error; int isPrimary; unsigned int maxHereis; static prod_class_t *accept; /* * Open the product-queue for writing. It will be closed by cleanup() * during process termination. */ if (pq) { (void) pq_close(pq); pq = NULL; } error = pq_open(pqfname, PQ_DEFAULT, &pq); if (error) { err_log_and_free(ERR_NEW2(error, NULL, "Couldn't open product-queue \"%s\" for writing: %s", pqfname, PQ_CORRUPT == error ? "The product-queue is inconsistent" : strerror(error)), ERR_FAILURE); svcerr_systemerr(xprt); svc_destroy(xprt); exit(error); } /* else */ error = down6_init(upName, upAddr, pqfname, pq); if (error) { uerror("Couldn't initialize downstream LDM"); svcerr_systemerr(xprt); svc_destroy(xprt); exit(error); } else { uinfo("Downstream LDM initialized"); } /* * The previous "accept" is freed here -- rather than freeing the * soon-to-be-allocated "accept" at the end of its block -- because it can * be used in the reply. */ if (accept) { free_prod_class(accept); /* NULL safe */ accept = NULL; } error = lcf_reduceToAcceptable(upName, inet_ntoa(upAddr->sin_addr), offered, &accept, &isPrimary); maxHereis = isPrimary ? UINT_MAX : 0; if (error) { serror("Couldn't validate HIYA"); svcerr_systemerr(xprt); svc_destroy(xprt); exit(error); } else { if (ulogIsDebug()) udebug("intersection: %s", s_prod_class(NULL, 0, accept)); if (accept->psa.psa_len == 0) { uwarn("Empty intersection of HIYA offer from %s (%s) and ACCEPT " "entries", upName, s_prod_class(NULL, 0, offered)); svcerr_weakauth(xprt); svc_destroy(xprt); exit(0); } else { error = down6_set_prod_class(accept); if (error) { if (DOWN6_SYSTEM_ERROR == error) { serror("Couldn't set product class: %s", s_prod_class(NULL, 0, accept)); } else { uerror("Couldn't set product class: %s", s_prod_class(NULL, 0, accept)); } svcerr_systemerr(xprt); svc_destroy(xprt); exit(EXIT_FAILURE); } /* else */ if (clss_eq(offered, accept)) { unotice("hiya6: %s", s_prod_class(NULL, 0, offered)); reply.code = OK; reply.hiya_reply_t_u.max_hereis = maxHereis; } else { if (ulogIsVerbose()) { char off[512]; char acc[512]; (void) s_prod_class(off, sizeof(off), offered), (void) s_prod_class( acc, sizeof(acc), accept); uinfo("hiya6: RECLASS: %s -> %s", off, acc); } reply.code = RECLASS; reply.hiya_reply_t_u.feedPar.prod_class = accept; reply.hiya_reply_t_u.feedPar.max_hereis = maxHereis; } } /* product-intersection != empty set */ } /* successful acl_check_hiya() */ return &reply; }
/** * Feeds or notifies a downstream LDM. This function returns either NULL or a * reply to be sent to the downstream LDM (e.g., a RECLASS message) or * terminates this process (hopefully after sending some data). * * @param xprt [in/out] Pointer to server-side transport handle. * @param want [in] Pointer to subscription by downstream LDM. * May contain a "signature" product-specification. * @param isNotifier [in] Whether or not the upstream LDM is a feeder or a * notifier. * @param maxHereis Maximum HEREIS size parameter. Ignored if "isNotifier" * is true. * @return The reply for the downstream LDM or NULL if no reply * should be made. */ static fornme_reply_t* feed_or_notify( SVCXPRT* const xprt, const prod_class_t* const want, const int isNotifier, const max_hereis_t maxHereis) { struct sockaddr_in downAddr = *svc_getcaller(xprt); ErrorObj* errObj; int status; char* downName = NULL; prod_class_t* origSub = NULL; prod_class_t* allowSub = NULL; const signaturet* signature = NULL; UpFilter* upFilter = NULL; fornme_reply_t* reply = NULL; int isPrimary; static fornme_reply_t theReply; static prod_class_t* uldbSub = NULL; /* * Clean-up from a (possibly) previous invocation */ (void)memset(&theReply, 0, sizeof(theReply)); if (uldbSub != NULL) { free_prod_class(uldbSub); uldbSub = NULL; } downName = strdup(hostbyaddr(&downAddr)); if (NULL == downName) { LOG_ADD1("Couldn't duplicate downstream host name: \"%s\"", hostbyaddr(&downAddr)); log_log(LOG_ERR); svcerr_systemerr(xprt); goto return_or_exit; } set_abbr_ident(downName, isNotifier ? "(noti)" : "(feed)"); /* * Remove any "signature" specification from the subscription. */ if ((errObj = separateProductClass(want, &origSub, &signature)) != NULL) { err_log_and_free(errObj, ERR_FAILURE); svcerr_systemerr(xprt); goto free_down_name; } /* * Get the upstream filter */ errObj = lcf_getUpstreamFilter(downName, &downAddr.sin_addr, origSub, &upFilter); if (errObj) { err_log_and_free(ERR_NEW(0, errObj, "Couldn't get \"upstream\" filter"), ERR_FAILURE); svcerr_systemerr(xprt); goto free_orig_sub; } if (NULL == upFilter) { err_log_and_free(ERR_NEW1(0, NULL, "Upstream filter prevents data-transfer: %s", s_prod_class(NULL, 0, origSub)), ERR_FAILURE); svcerr_weakauth(xprt); goto free_orig_sub; } /* TODO: adjust time? */ /* * Reduce the subscription according to what the downstream host is allowed * to receive. */ status = lcf_reduceToAllowed(downName, &downAddr.sin_addr, origSub, &allowSub); if (status == ENOMEM) { LOG_SERROR0("Couldn't compute wanted/allowed product intersection"); log_log(LOG_ERR); svcerr_systemerr(xprt); goto free_up_filter; } if (status == EINVAL) { LOG_ADD1("Invalid pattern in product-class: %s", s_prod_class(NULL, 0, origSub)); log_log(LOG_WARNING); theReply.code = BADPATTERN; reply = &theReply; goto free_up_filter; } assert(status == 0); (void) logIfReduced(origSub, allowSub, "ALLOW entries"); /* * Reduce the subscription according to existing subscriptions from the * same downstream host and, if `isAntiDosEnabled()` returns `true`, * terminate every previously-existing upstream LDM process that's feeding * (not notifying) a subset of the subscription to the same IP address. * * The following relies on atexit()-registered cleanup for removal of the * entry from the upstream LDM database. */ isPrimary = maxHereis > UINT_MAX / 2; status = uldb_addProcess(getpid(), 6, &downAddr, allowSub, &uldbSub, isNotifier, isPrimary); if (status) { LOG_ADD0("Couldn't add this process to the upstream LDM database"); log_log(LOG_ERR); svcerr_systemerr(xprt); goto free_allow_sub; } (void) logIfReduced(allowSub, uldbSub, "existing subscriptions"); /* * Send a RECLASS reply to the downstream LDM if appropriate. */ if (!clss_eq(origSub, uldbSub)) { theReply.code = RECLASS; if (0 < uldbSub->psa.psa_len) { /* * The downstream LDM is allowed less than it requested and was * entered into the upstream LDM database. */ (void)uldb_remove(getpid()); /* maybe next time */ theReply.fornme_reply_t_u.prod_class = uldbSub; } else { /* * The downstream LDM isn't allowed anything and wasn't entered * into the upstream LDM database. */ static prod_class noSub = { { 0, 0 }, /* TS_ZERO */ { 0, 0 }, /* TS_ZERO */ { 0, (prod_spec *) NULL } }; theReply.fornme_reply_t_u.prod_class = &noSub; } reply = &theReply; goto free_allow_sub; } /* * Reply to the downstream LDM that the subscription will be honored. */ theReply.code = OK; theReply.fornme_reply_t_u.id = (unsigned) getpid(); if (!svc_sendreply(xprt, (xdrproc_t)xdr_fornme_reply_t, (caddr_t)&theReply)) { LOG_ADD0("svc_sendreply(...) failure"); log_log(LOG_ERR); svcerr_systemerr(xprt); goto free_allow_sub; } /* * Wait a second before sending anything to the downstream LDM. */ (void) sleep(1); status = isNotifier ? up6_new_notifier(xprt->xp_sock, downName, &downAddr, uldbSub, signature, getQueuePath(), interval, upFilter) : up6_new_feeder(xprt->xp_sock, downName, &downAddr, uldbSub, signature, getQueuePath(), interval, upFilter, isPrimary); svc_destroy(xprt); /* closes the socket */ exit(status); /* * Reply and error handling: */ free_allow_sub: free_prod_class(allowSub); free_up_filter: upFilter_free(upFilter); free_orig_sub: free_prod_class(origSub); free_down_name: free(downName); return_or_exit: return reply; }
/** * Returns an identifier of the remote client. * * @param[in] rqstp Client-request object. */ const char* rpc_getClientId( struct svc_req* const rqstp) { return hostbyaddr(svc_getcaller(rqstp->rq_xprt)); }