static uint32_t immnd_mds_rcv(IMMND_CB *cb, MDS_CALLBACK_RECEIVE_INFO *rcv_info) { uint32_t rc = NCSCC_RC_SUCCESS; IMMSV_EVT *pEvt = (IMMSV_EVT *)rcv_info->i_msg; /*IMMND_SYNC_SEND_NODE *node = NULL; */ pEvt->sinfo.ctxt = rcv_info->i_msg_ctxt; pEvt->sinfo.dest = rcv_info->i_fr_dest; pEvt->sinfo.to_svc = rcv_info->i_fr_svc_id; if (rcv_info->i_rsp_reqd) { pEvt->sinfo.stype = MDS_SENDTYPE_SNDRSP; } /* Put it in IMMND's Event Queue */ if (pEvt->info.immnd.type == IMMND_EVT_A2ND_IMM_INIT) rc = m_NCS_IPC_SEND(&cb->immnd_mbx, (NCSCONTEXT)pEvt, NCS_IPC_PRIORITY_HIGH); else rc = m_NCS_IPC_SEND(&cb->immnd_mbx, (NCSCONTEXT)pEvt, NCS_IPC_PRIORITY_NORMAL); if (NCSCC_RC_SUCCESS != rc) { LOG_WA("NCS IPC Send Failed"); } return rc; }
/**************************************************************************** Name : ava_hdl_cbk_param_add Description : This routine adds the callback parameters to the pending callback list. Arguments : cb - ptr to the AvA control block hdl_rec - ptr to the handle record cbk_info - ptr to the callback parameters Return Values : NCSCC_RC_SUCCESS/NCSCC_RC_FAILURE Notes : This routine reuses the callback info ptr that is received from MDS thus avoiding an extra copy. ******************************************************************************/ uint32_t ava_hdl_cbk_param_add(AVA_CB *cb, AVA_HDL_REC *hdl_rec, AVSV_AMF_CBK_INFO *cbk_info) { AVA_PEND_CBK_REC *rec = 0; uint32_t rc = NCSCC_RC_SUCCESS; TRACE_ENTER(); /* allocate the callbk rec */ if (!(rec = calloc(1, sizeof(AVA_PEND_CBK_REC)))) { rc = NCSCC_RC_FAILURE; goto done; } /* populate the callbk parameters */ rec->cbk_info = cbk_info; /* now push it to the pending list */ rc = m_NCS_IPC_SEND(&hdl_rec->callbk_mbx, rec, NCS_IPC_PRIORITY_NORMAL); done: if ((NCSCC_RC_SUCCESS != rc) && rec) ava_hdl_cbk_rec_del(rec); TRACE_LEAVE2("Callback param successfully added for handle: %x", hdl_rec->hdl); return rc; }
static uint32_t mds_quiesced_ack(struct ncsmds_callback_info *mds_info) { SMFSV_EVT *smfsv_evt; if (smfd_cb->is_quiesced_set == true) { /** allocate an SMFSV_EVT **/ if (NULL == (smfsv_evt = calloc(1, sizeof(SMFSV_EVT)))) { LOG_NO("calloc FAILED"); goto err; } /** Initialize the Event **/ smfsv_evt->type = SMFSV_EVT_TYPE_SMFD; smfsv_evt->info.smfd.type = SMFD_EVT_QUIESCED_ACK; smfsv_evt->cb_hdl = (uint32_t)mds_info->i_yr_svc_hdl; /* Push the event and we are done */ if (NCSCC_RC_FAILURE == m_NCS_IPC_SEND(&smfd_cb->mbx, smfsv_evt, NCS_IPC_PRIORITY_VERY_HIGH)) { TRACE("ipc send failed"); smfsv_evt_destroy(smfsv_evt); goto err; } } return NCSCC_RC_SUCCESS; err: return NCSCC_RC_FAILURE; }
/** * Function to add message to msg dist list * * @param buffer dst_pid len * * @return NCSCC_RC_SUCCESS * @return NCSCC_RC_FAILURE * */ uns32 dtm_add_to_msg_dist_list(uns8 *buffer, uns16 len, NODE_ID node_id) { /* Post the event to the mailbox of the inter_thread */ DTM_SND_MSG_ELEM *msg_elem = NULL; TRACE_ENTER(); if (NULL == (msg_elem = calloc(1, sizeof(DTM_SND_MSG_ELEM)))) { return NCSCC_RC_FAILURE; } msg_elem->type = DTM_MBX_DATA_MSG_TYPE; msg_elem->pri = NCS_IPC_PRIORITY_HIGH; msg_elem->info.data.buffer = buffer; msg_elem->info.data.dst_nodeid = node_id; msg_elem->info.data.buff_len = len; if ((m_NCS_IPC_SEND(&dtms_gl_cb->mbx, msg_elem, msg_elem->pri)) != NCSCC_RC_SUCCESS) { /* Message Queuing failed */ free(msg_elem); TRACE("DTM : Internode IPC_SEND : MSG EVENT : FAILED"); TRACE_LEAVE(); return NCSCC_RC_FAILURE; } else { TRACE("DTM : Internode IPC_SEND : MSG EVENT : SUCC"); TRACE_LEAVE(); return NCSCC_RC_SUCCESS; } }
/** * Function to process rcv data message internode * * @param buffer dst_pid len * * @return NCSCC_RC_SUCCESS * @return NCSCC_RC_FAILURE * */ uns32 dtm_internode_process_rcv_data_msg(uns8 *buffer, uns32 dst_pid, uns16 len) { /* Post the event to the mailbox of the intra_thread */ DTM_RCV_MSG_ELEM *dtm_msg_elem = NULL; TRACE_ENTER(); if (NULL == (dtm_msg_elem = calloc(1, sizeof(DTM_RCV_MSG_ELEM)))) { return NCSCC_RC_FAILURE; } dtm_msg_elem->type = DTM_MBX_MSG_TYPE; dtm_msg_elem->pri = NCS_IPC_PRIORITY_HIGH; dtm_msg_elem->info.data.len = len; dtm_msg_elem->info.data.dst_pid = dst_pid; dtm_msg_elem->info.data.buffer = buffer; if ((m_NCS_IPC_SEND(&dtm_intranode_cb->mbx, dtm_msg_elem, dtm_msg_elem->pri)) != NCSCC_RC_SUCCESS) { /* Message Queuing failed */ free(dtm_msg_elem); TRACE("DTM : Intranode IPC_SEND : DATA MSG: FAILED"); TRACE_LEAVE(); return NCSCC_RC_FAILURE; } else { TRACE("DTM : Intranode IPC_SEND : DATA MSG: SUCC"); TRACE_LEAVE(); return NCSCC_RC_SUCCESS; } }
/**************************************************************************** * Name : plms_mds_svc_evt * * Description : PLMS is informed when MDS events occur that he has * subscribed to * * Arguments : * cb : PLMS control Block. * enc_info : Svc evt info. * * Return Values : None * * Notes : None. *****************************************************************************/ static SaUint32T plms_mds_svc_evt(MDS_CALLBACK_SVC_EVENT_INFO *svc_evt) { PLMS_EVT *evt = NULL; PLMS_CB *cb = plms_cb; uns32 rc; evt = (PLMS_EVT *)calloc(1, sizeof(PLMS_EVT)); if (!evt) { LOG_ER("PLMS - Evt Alloc Failed"); return NCSCC_RC_OUT_OF_MEM; } /* Service PLMA events at PLMS */ memset(evt, 0, sizeof(PLMS_EVT)); evt->req_res= PLMS_REQ; evt->req_evt.req_type = PLMS_MDS_INFO_EVT_T; evt->req_evt.mds_info.change = svc_evt->i_change; evt->req_evt.mds_info.dest = svc_evt->i_dest; evt->req_evt.mds_info.svc_id = svc_evt->i_svc_id; evt->req_evt.mds_info.node_id = svc_evt->i_node_id; /* Put it in PLMS's Event Queue */ rc = m_NCS_IPC_SEND(&cb->mbx, (NCSCONTEXT)evt, NCS_IPC_PRIORITY_HIGH); if (NCSCC_RC_SUCCESS != rc) { LOG_ER("PLMS - IPC SEND FAILED"); free(evt); } return rc; }
/**************************************************************************** Name : lga_lgs_msg_proc Description : This routine is used to process the ASYNC incoming LGS messages. Arguments : pointer to struct ncsmds_callback_info Return Values : NCSCC_RC_SUCCESS/NCSCC_RC_FAILURE Notes : None. ******************************************************************************/ static uint32_t lga_lgs_msg_proc(lga_cb_t *cb, lgsv_msg_t *lgsv_msg, MDS_SEND_PRIORITY_TYPE prio) { TRACE_ENTER(); switch (lgsv_msg->type) { case LGSV_LGS_CBK_MSG: switch (lgsv_msg->info.cbk_info.type) { case LGSV_WRITE_LOG_CALLBACK_IND: { lga_client_hdl_rec_t *lga_hdl_rec; TRACE_2("LGSV_LGS_WRITE_LOG_CBK: inv = %d, error = %d", (int)lgsv_msg->info.cbk_info.inv, (int)lgsv_msg->info.cbk_info.write_cbk.error); /** Create the chan hdl record here before ** queing this message onto the priority queue ** so that the dispatch by the application to fetch ** the callback is instantaneous. **/ /** Lookup the hdl rec by client_id **/ if (NULL == (lga_hdl_rec = lga_find_hdl_rec_by_regid(cb, lgsv_msg->info.cbk_info.lgs_client_id))) { TRACE("regid not found"); lga_msg_destroy(lgsv_msg); TRACE_LEAVE(); return NCSCC_RC_FAILURE; } /** enqueue this message **/ if (NCSCC_RC_SUCCESS != m_NCS_IPC_SEND(&lga_hdl_rec->mbx, lgsv_msg, prio)) { TRACE("IPC SEND FAILED"); lga_msg_destroy(lgsv_msg); TRACE_LEAVE(); return NCSCC_RC_FAILURE; } } break; default: TRACE("unknown type %d", lgsv_msg->info.cbk_info.type); lga_msg_destroy(lgsv_msg); TRACE_LEAVE(); return NCSCC_RC_FAILURE; break; } break; default: /** Unexpected message **/ TRACE_2("Unexpected message type: %d", lgsv_msg->type); lga_msg_destroy(lgsv_msg); TRACE_LEAVE(); return NCSCC_RC_FAILURE; break; } TRACE_LEAVE(); return NCSCC_RC_SUCCESS; }
/*************************************************************************** * Name : fm_fill_mds_evt_post_fm_mbx * * Description : Posts an event to mail box. * * Arguments : Control Block, Pointer to event, slot, subslot and Event Code. * * Return Values : NCSCC_RC_FAILURE/NCSCC_RC_SUCCESS * * Notes : None. ***************************************************************************/ static uint32_t fm_fill_mds_evt_post_fm_mbx(FM_CB *cb, FM_EVT *fm_evt, NODE_ID node_id, FM_FSM_EVT_CODE evt_code) { fm_evt->evt_code = evt_code; fm_evt->node_id = node_id; if (m_NCS_IPC_SEND(&cb->mbx, fm_evt, NCS_IPC_PRIORITY_HIGH) != NCSCC_RC_SUCCESS) { return NCSCC_RC_FAILURE; } return NCSCC_RC_SUCCESS; }
void print_glnd_cb() { GLND_CB *cb; GLSV_GLND_EVT *glnd_evt; glnd_evt = m_MMGR_ALLOC_GLND_EVT; memset(glnd_evt,0,sizeof(GLSV_GLND_EVT)); glnd_evt->type = GLSV_GLND_EVT_CB_DUMP; cb = (GLND_CB*)ncshm_take_hdl(NCS_SERVICE_ID_GLND, m_GLND_RETRIEVE_GLND_CB_HDL); glnd_evt->glnd_hdl = cb->cb_hdl_id; m_NCS_IPC_SEND(&cb->glnd_mbx, glnd_evt, MDS_SEND_PRIORITY_MEDIUM); ncshm_give_hdl(gl_glnd_hdl); }
/***************************************************************************** PROCEDURE NAME : eds_tmr_exp DESCRIPTION : EDS timer expiry callback routine.It sends corresponding timer events to EDS. ARGUMENTS : uarg - ptr to the EDS timer block RETURNS : void NOTES : None *****************************************************************************/ void eds_tmr_exp(void *uarg) { EDS_CB *eds_cb = 0; EDS_TMR *tmr = (EDS_TMR *)uarg; EDSV_EDS_EVT *evt = 0; uns32 temp_tmr_hdl; temp_tmr_hdl = tmr->cb_hdl; /* retrieve EDS CB */ if (NULL == (eds_cb = (EDS_CB *)ncshm_take_hdl(NCS_SERVICE_ID_EDS, tmr->cb_hdl))) { m_LOG_EDSV_S(EDS_CB_TAKE_HANDLE_FAILED, NCSFL_LC_EDSV_INIT, NCSFL_SEV_ERROR, 0, __FILE__, __LINE__, 0); return; } if (tmr->is_active) { tmr->is_active = FALSE; /* Destroy the timer if it exists.. */ if (tmr->tmr_id != TMR_T_NULL) { m_NCS_TMR_DESTROY(tmr->tmr_id); tmr->tmr_id = TMR_T_NULL; } /* create & send the timer event */ evt = m_MMGR_ALLOC_EDSV_EDS_EVT; if (evt) { memset(evt, '\0', sizeof(EDSV_EDS_EVT)); /* assign the timer evt */ evt->evt_type = eds_tmr_evt_map(tmr->type); evt->info.tmr_info.opq_hdl = tmr->opq_hdl; evt->cb_hdl = tmr->cb_hdl; if (NCSCC_RC_FAILURE == m_NCS_IPC_SEND(&eds_cb->mbx, evt, NCS_IPC_PRIORITY_HIGH)) { m_LOG_EDSV_S(EDS_TIMER_STOP_FAIL, NCSFL_LC_EDSV_INIT, NCSFL_SEV_ERROR, evt->cb_hdl, __FILE__, __LINE__, evt->evt_type); eds_evt_destroy(evt); } } } /* return EDS CB */ ncshm_give_hdl(temp_tmr_hdl); return; }
/***********************************************************************//** * @brief MDS will call this function on receiving PLMA messages. * * @param[in] rcv_info - MDS Receive information. * * @return NCSCC_RC_SUCCESS/NCSCC_RC_FAILURE. ***************************************************************************/ static uint32_t plma_mds_rcv(MDS_CALLBACK_RECEIVE_INFO *rcv_info) { uint32_t rc = NCSCC_RC_SUCCESS; PLMA_ENTITY_GROUP_INFO *grp_info; PLMA_CB *plma_cb = plma_ctrlblk; PLMS_EVT *pEvt = (PLMS_EVT *)rcv_info->i_msg; TRACE_ENTER(); if(pEvt->req_res == PLMS_REQ){ if(pEvt->req_evt.req_type == PLMS_AGENT_TRACK_EVT_T){ if(pEvt->req_evt.agent_track.evt_type == PLMS_AGENT_TRACK_CBK_EVT){ SaPlmEntityGroupHandleT grp_hdl = pEvt->req_evt.agent_track.grp_handle; grp_info = (PLMA_ENTITY_GROUP_INFO *)ncs_patricia_tree_get(&plma_cb->entity_group_info, (uint8_t *)&grp_hdl); if(!grp_info){ /** FIXME : free the evt structure */ return NCSCC_RC_SUCCESS; } if(!grp_info->client_info){ /** FIXME : free the evt structure */ return NCSCC_RC_SUCCESS; } pEvt->sinfo.ctxt = rcv_info->i_msg_ctxt; pEvt->sinfo.dest = rcv_info->i_fr_dest; pEvt->sinfo.to_svc = rcv_info->i_fr_svc_id; if (rcv_info->i_rsp_reqd) { pEvt->sinfo.stype = MDS_SENDTYPE_RSP; } /* Put it in PLMA's Event Queue */ rc = m_NCS_IPC_SEND(&grp_info->client_info->callbk_mbx, (NCSCONTEXT)pEvt, NCS_IPC_PRIORITY_NORMAL); if (NCSCC_RC_SUCCESS != rc) { LOG_ER("PLMA - IPC SEND FAILED"); } } } } TRACE_LEAVE(); return rc; }
/***************************************************************************** PROCEDURE NAME : eds_tmr_exp DESCRIPTION : EDS timer expiry callback routine.It sends corresponding timer events to EDS. ARGUMENTS : uarg - ptr to the EDS timer block RETURNS : void NOTES : None *****************************************************************************/ void eds_tmr_exp(void *uarg) { EDS_CB *eds_cb = 0; EDS_TMR *tmr = (EDS_TMR *)uarg; EDSV_EDS_EVT *evt = 0; uint32_t temp_tmr_hdl; temp_tmr_hdl = tmr->cb_hdl; /* retrieve EDS CB */ if (NULL == (eds_cb = (EDS_CB *)ncshm_take_hdl(NCS_SERVICE_ID_EDS, tmr->cb_hdl))) { LOG_ER("Global take handle failed"); return; } if (tmr->is_active) { tmr->is_active = false; /* Destroy the timer if it exists.. */ if (tmr->tmr_id != TMR_T_NULL) { m_NCS_TMR_DESTROY(tmr->tmr_id); tmr->tmr_id = TMR_T_NULL; } /* create & send the timer event */ evt = m_MMGR_ALLOC_EDSV_EDS_EVT; if (evt) { memset(evt, '\0', sizeof(EDSV_EDS_EVT)); /* assign the timer evt */ evt->evt_type = eds_tmr_evt_map(tmr->type); evt->info.tmr_info.opq_hdl = tmr->opq_hdl; evt->cb_hdl = tmr->cb_hdl; if (NCSCC_RC_FAILURE == m_NCS_IPC_SEND(&eds_cb->mbx, evt, NCS_IPC_PRIORITY_HIGH)) { LOG_ER("IPC send failed for timer event"); eds_evt_destroy(evt); } } } /* return EDS CB */ ncshm_give_hdl(temp_tmr_hdl); return; }
/***************************************************************************** PROCEDURE NAME : gld_tmr_exp DESCRIPTION : GLD timer expiry callback routine.It sends corresponding timer events to GLD. ARGUMENTS : uarg - ptr to the GLD timer block RETURNS : void NOTES : None *****************************************************************************/ void gld_tmr_exp(void *uarg) { GLSV_GLD_CB *cb = 0; GLD_TMR *tmr = (GLD_TMR *)uarg; GLSV_GLD_EVT *evt = 0; uns32 cb_hdl; cb_hdl = tmr->cb_hdl; /* retrieve GLD CB */ cb = (GLSV_GLD_CB *)ncshm_take_hdl(NCS_SERVICE_ID_GLD, tmr->cb_hdl); if (!cb) { m_LOG_GLD_HEADLINE(GLD_TAKE_HANDLE_FAILED, NCSFL_SEV_ERROR, __FILE__, __LINE__, 0); return; } tmr->is_active = FALSE; /* create & send the timer event */ evt = m_MMGR_ALLOC_GLSV_GLD_EVT; if (evt == GLSV_GLD_EVT_NULL) { m_LOG_GLD_MEMFAIL(GLD_EVT_ALLOC_FAILED, __FILE__, __LINE__); ncshm_give_hdl(cb_hdl); return; } memset(evt, 0, sizeof(GLSV_GLD_EVT)); if (evt) { /* assign the timer evt */ evt->evt_type = gld_tmr_evt_map(tmr->type); evt->info.tmr.opq_hdl = tmr->opq_hdl; evt->info.tmr.resource_id = tmr->resource_id; memcpy(&evt->info.tmr.mdest_id, &tmr->mdest_id, sizeof(MDS_DEST)); evt->gld_cb = cb; /* Push the event and we are done */ if (m_NCS_IPC_SEND(&cb->mbx, evt, NCS_IPC_PRIORITY_NORMAL) == NCSCC_RC_FAILURE) { m_LOG_GLD_HEADLINE(GLD_IPC_SEND_FAIL, NCSFL_SEV_ERROR, __FILE__, __LINE__, 0); gld_evt_destroy(evt); ncshm_give_hdl(cb_hdl); return; } } /* return GLD CB */ ncshm_give_hdl(cb_hdl); return; }
static uint32_t mds_svc_event(struct ncsmds_callback_info *info) { uint32_t rc = NCSCC_RC_SUCCESS; SMFSV_EVT *evt = NULL; MDS_CALLBACK_SVC_EVENT_INFO *svc_evt = &info->info.svc_evt; /* First make sure that this event is indeed for us */ if (info->info.svc_evt.i_your_id != NCSMDS_SVC_ID_SMFD) { TRACE("event not NCSMDS_SVC_ID_SMFD"); rc = NCSCC_RC_FAILURE; goto done; } /* If this evt was sent from SMFND act on this */ if (info->info.svc_evt.i_svc_id == NCSMDS_SVC_ID_SMFND) { /** allocate an SMFSV_EVENT **/ if (NULL == (evt = calloc(1, sizeof(SMFSV_EVT)))) { LOG_ER("calloc FAILED"); rc = NCSCC_RC_FAILURE; goto done; } /* Send the SMFD_EVT_MDS_INFO to the mailbox */ evt->type = SMFSV_EVT_TYPE_SMFD; evt->info.smfd.type = SMFD_EVT_MDS_INFO; evt->info.smfd.event.mds_info.change = svc_evt->i_change; evt->info.smfd.event.mds_info.dest = svc_evt->i_dest; evt->info.smfd.event.mds_info.svc_id = svc_evt->i_svc_id; evt->info.smfd.event.mds_info.node_id = svc_evt->i_node_id; evt->info.smfd.event.mds_info.rem_svc_pvt_ver = svc_evt->i_rem_svc_pvt_ver; TRACE("SMFND SVC event %d for nodeid %x, svc version %u", svc_evt->i_change, svc_evt->i_node_id, svc_evt->i_rem_svc_pvt_ver); /* Put it in SMFD's Event Queue */ rc = m_NCS_IPC_SEND(&smfd_cb->mbx, (NCSCONTEXT) evt, NCS_IPC_PRIORITY_HIGH); if (NCSCC_RC_SUCCESS != rc) { free(evt); rc = NCSCC_RC_FAILURE; goto done; } } done: return rc; }
/**************************************************************************** * Name : mqd_timer_expiry * * Description : This function which is registered with the OS tmr function, * which will post a message to the corresponding mailbox * depending on the component type. * *****************************************************************************/ void mqd_timer_expiry(NCSCONTEXT uarg) { MQD_TMR *tmr = (MQD_TMR *)uarg; NCS_IPC_PRIORITY priority = NCS_IPC_PRIORITY_HIGH; MQD_CB *cb; MQSV_EVT *evt = 0; uns32 mqd_hdl; if (tmr != NULL) { mqd_hdl = tmr->uarg; if (tmr->is_active) tmr->is_active = FALSE; /* Destroy the timer if it exists.. */ if (tmr->tmr_id != TMR_T_NULL) { m_NCS_TMR_DESTROY(tmr->tmr_id); tmr->tmr_id = TMR_T_NULL; } /* post a message to the corresponding component */ if ((cb = (MQD_CB *)ncshm_take_hdl(NCS_SERVICE_ID_MQD, mqd_hdl)) != NULL) { evt = m_MMGR_ALLOC_MQSV_EVT(NCS_SERVICE_ID_MQD); if (evt == NULL) { m_LOG_MQSV_D(MQD_MEMORY_ALLOC_FAIL, NCSFL_LC_TIMER, NCSFL_SEV_ERROR, 0, __FILE__, __LINE__); return; } memset(evt, 0, sizeof(MQSV_EVT)); evt->type = MQSV_EVT_MQD_CTRL; evt->msg.mqd_ctrl.type = MQD_MSG_TMR_EXPIRY; evt->msg.mqd_ctrl.info.tmr_info.nodeid = tmr->nodeid; evt->msg.mqd_ctrl.info.tmr_info.type = tmr->type; /* Post the event to MQD Thread */ m_NCS_IPC_SEND(&cb->mbx, evt, priority); ncshm_give_hdl(mqd_hdl); } } return; }
/**************************************************************************** * Name : fm_tmr_exp * * Description : Timer Expiry function * * Arguments : Pointer to timer data structure * * Return Values : None. * * Notes : None. *****************************************************************************/ void fm_tmr_exp(void *fm_tmr) { FM_CB *fm_cb = NULL; FM_TMR *tmr = (FM_TMR *)fm_tmr; FM_EVT *evt = NULL; if (tmr == NULL) { return; } /* Take handle */ fm_cb = ncshm_take_hdl(NCS_SERVICE_ID_GFM, gl_fm_hdl); if (fm_cb == NULL) { syslog(LOG_ERR, "Taking handle failed in timer expiry "); return; } if (FM_TMR_STOPPED == tmr->status) { return; } tmr->status = FM_TMR_STOPPED; /* Create & send the timer event to the FM MBX. */ evt = m_MMGR_ALLOC_FM_EVT; if (evt != NULL) { memset(evt, '\0', sizeof(FM_EVT)); evt->evt_code = FM_EVT_TMR_EXP; evt->info.fm_tmr = tmr; if (m_NCS_IPC_SEND(&fm_cb->mbx, evt, NCS_IPC_PRIORITY_HIGH) != NCSCC_RC_SUCCESS) { syslog(LOG_ERR, "IPC send failed in timer expiry "); m_MMGR_FREE_FM_EVT(evt); } } /* Give handle */ ncshm_give_hdl(gl_fm_hdl); fm_cb = NULL; return; }
/**************************************************************************** Name : glsv_gla_callback_queue_write Description : This routine is used to queue the callbacks to the client by the MDS. Arguments : gla_cb - pointer to the gla control block handle - handle id of the client clbk_info - pointer to the callback information Return Values : NCSCC_RC_SUCCESS/NCSCC_RC_FAILURE Notes : None ******************************************************************************/ uns32 glsv_gla_callback_queue_write(GLA_CB *gla_cb, SaLckHandleT handle, GLSV_GLA_CALLBACK_INFO *clbk_info) { GLA_CLIENT_INFO *client_info = NULL; uns32 rc = NCSCC_RC_FAILURE; m_NCS_LOCK(&gla_cb->cb_lock, NCS_LOCK_READ); /* Search for the node from the client tree */ client_info = (GLA_CLIENT_INFO *)ncs_patricia_tree_get(&gla_cb->gla_client_tree, (uns8 *)&handle); if (client_info == NULL) { /* recieved a callback for an non-existant client. so return failure */ return rc; } else { rc = m_NCS_IPC_SEND(&client_info->callbk_mbx, clbk_info, NCS_IPC_PRIORITY_NORMAL); } m_NCS_UNLOCK(&gla_cb->cb_lock, NCS_LOCK_READ); return rc; }
/**************************************************************************** * Name : cpd_timer_expiry * * Description : This function which is registered with the OS tmr function, * which will post a message to the corresponding mailbox * depending on the component type. * *****************************************************************************/ void cpd_timer_expiry(NCSCONTEXT uarg) { /* uint32_t hdl = (uint32_t)uarg; CPD_TMR *tmr = NULL; */ CPD_TMR *tmr = (CPD_TMR *)uarg; NCS_IPC_PRIORITY priority = NCS_IPC_PRIORITY_HIGH; CPD_CB *cb; CPSV_EVT *evt = NULL; uint32_t cpd_hdl = m_CPD_GET_CB_HDL; /* post a message to the corresponding component */ if ((cb = (CPD_CB *)ncshm_take_hdl(NCS_SERVICE_ID_CPD, cpd_hdl)) == NULL) return; /* tmr = (CPD_TMR *)ncshm_take_hdl(NCS_SERVICE_ID_CPD, hdl); */ if (tmr) { evt = m_MMGR_ALLOC_CPSV_EVT(NCS_SERVICE_ID_CPD); if (evt) { evt->type = CPSV_EVT_TYPE_CPD; evt->info.cpd.type = CPD_EVT_TIME_OUT; switch (tmr->type) { case CPD_TMR_TYPE_CPND_RETENTION: evt->info.cpd.info.tmr_info.type = CPD_TMR_TYPE_CPND_RETENTION; evt->info.cpd.info.tmr_info.info.cpnd_dest = tmr->info.cpnd_dest; break; default: break; } /* ncshm_give_hdl(hdl); */ /* Post the event to CPD Thread */ m_NCS_IPC_SEND(&cb->cpd_mbx, evt, priority); } ncshm_give_hdl(cpd_hdl); } return; }
static uns32 mqnd_mds_rcv(MQND_CB *pMqnd, MDS_CALLBACK_RECEIVE_INFO *rcv_info) { uns32 rc = NCSCC_RC_SUCCESS; MQSV_EVT *pEvt = (MQSV_EVT *)rcv_info->i_msg; pEvt->sinfo.ctxt = rcv_info->i_msg_ctxt; pEvt->sinfo.dest = rcv_info->i_fr_dest; pEvt->sinfo.to_svc = rcv_info->i_fr_svc_id; if (rcv_info->i_rsp_reqd) { pEvt->sinfo.stype = MDS_SENDTYPE_RSP; } /* Put it in MQND's Event Queue */ rc = m_NCS_IPC_SEND(&pMqnd->mbx, (NCSCONTEXT)pEvt, NCS_IPC_PRIORITY_NORMAL); if (NCSCC_RC_SUCCESS != rc) { m_LOG_MQSV_ND(MQND_MDS_SND_TO_MAILBOX_FAILED, NCSFL_LC_MQSV_INIT, NCSFL_SEV_ERROR, rc, __FILE__, __LINE__); } return rc; }
static uint32_t mds_rcv(struct ncsmds_callback_info *mds_info) { SMFSV_EVT *smfsv_evt = (SMFSV_EVT *) mds_info->info.receive.i_msg; uint32_t rc = NCSCC_RC_SUCCESS; smfsv_evt->cb_hdl = (uint32_t) mds_info->i_yr_svc_hdl; smfsv_evt->fr_node_id = mds_info->info.receive.i_node_id; smfsv_evt->fr_dest = mds_info->info.receive.i_fr_dest; smfsv_evt->fr_svc = mds_info->info.receive.i_fr_svc_id; smfsv_evt->rcvd_prio = mds_info->info.receive.i_priority; smfsv_evt->mds_ctxt = mds_info->info.receive.i_msg_ctxt; /* Send the event to our mailbox */ rc = m_NCS_IPC_SEND(&smfd_cb->mbx, smfsv_evt, mds_info->info.receive.i_priority); if (rc != NCSCC_RC_SUCCESS) { LOG_ER("IPC send failed %d", rc); } return rc; }
/**************************************************************************** * Name : plms_mds_rcv * * Description : MDS will call this function on receiving PLMS messages. * * Arguments : rcv_info - MDS Receive information. * * Return Values : NCSCC_RC_SUCCESS/Error Code. * * Notes : None. ****************************************************************************/ static SaUint32T plms_mds_rcv(MDS_CALLBACK_RECEIVE_INFO *rcv_info) { PLMS_CB * cb = plms_cb; uns32 rc = NCSCC_RC_SUCCESS; PLMS_EVT *pEvt = (PLMS_EVT *)rcv_info->i_msg; pEvt->sinfo.ctxt = rcv_info->i_msg_ctxt; pEvt->sinfo.dest = rcv_info->i_fr_dest; pEvt->sinfo.to_svc = rcv_info->i_fr_svc_id; if (rcv_info->i_rsp_reqd) { pEvt->sinfo.stype = MDS_SENDTYPE_RSP; } /* Put it in PLMS's Event Queue */ rc = m_NCS_IPC_SEND(&cb->mbx, (NCSCONTEXT)pEvt, NCS_IPC_PRIORITY_NORMAL); if (NCSCC_RC_SUCCESS != rc) { LOG_ER("PLMS - IPC SEND FAILED"); } return rc; }
static uint32_t immnd_mds_svc_evt(IMMND_CB *cb, MDS_CALLBACK_SVC_EVENT_INFO *svc_evt) { IMMSV_EVT *evt; uint32_t rc = NCSCC_RC_SUCCESS, priority = NCS_IPC_PRIORITY_HIGH; TRACE_ENTER(); if (svc_evt->i_svc_id == NCSMDS_SVC_ID_IMMD) { m_NCS_LOCK(&cb->immnd_immd_up_lock, NCS_LOCK_WRITE); switch (svc_evt->i_change) { case NCSMDS_DOWN: TRACE("IMMD SERVICE DOWN => CLUSTER GOING DOWN"); cb->fevs_replies_pending = 0; break; case NCSMDS_UP: TRACE("NCSMDS_UP for IMMD. cb->is_immd_up = true; (v)dest:%llu", (long long unsigned int) svc_evt->i_dest); cb->is_immd_up = true; cb->immd_mdest_id = svc_evt->i_dest; break; case NCSMDS_NO_ACTIVE: /* Do NOT set cb->is_immd_up to false, messages to IMMD vdest buffered */ if (cb->fevs_replies_pending) { LOG_WA("Director Service in NOACTIVE state - " "fevs replies pending:%u fevs highest processed:%llu", cb->fevs_replies_pending, cb->highestProcessed); TRACE("Resetting fevs replies pending to zero"); cb->fevs_replies_pending = 0; } else { TRACE("Director Service in NOACTIVE state"); } break; case NCSMDS_NEW_ACTIVE: TRACE("NCSMDS_NEW_ACTIVE IMMD"); cb->immd_mdest_id = svc_evt->i_dest; break; case NCSMDS_RED_UP: LOG_ER("NCSMDS_RED_UP: SHOULD NOT HAPPEN"); break; case NCSMDS_RED_DOWN: LOG_ER("NCSMDS_RED_DOWN: SHOULD NOT HAPPEN"); break; case NCSMDS_CHG_ROLE: LOG_ER("NCSMDS_CHG_ROLE: SHOULD NOT HAPPEN"); break; default: break; } priority = NCS_IPC_PRIORITY_VERY_HIGH; m_NCS_UNLOCK(&cb->immnd_immd_up_lock, NCS_LOCK_WRITE); } /* IMMA events from other nodes can not happen */ if ((svc_evt->i_svc_id == NCSMDS_SVC_ID_IMMA_OM) || (svc_evt->i_svc_id == NCSMDS_SVC_ID_IMMA_OI)) osafassert(m_NCS_NODE_ID_FROM_MDS_DEST(cb->immnd_mdest_id) == m_NCS_NODE_ID_FROM_MDS_DEST(svc_evt->i_dest)); /* Send the IMMND_EVT_MDS_INFO to IMMND */ evt = calloc(1, sizeof(IMMSV_EVT)); if (evt == NULL) { LOG_WA("calloc failed"); return NCSCC_RC_FAILURE; } evt->type = IMMSV_EVT_TYPE_IMMND; evt->info.immnd.type = IMMND_EVT_MDS_INFO; evt->info.immnd.info.mds_info.change = svc_evt->i_change; evt->info.immnd.info.mds_info.dest = svc_evt->i_dest; evt->info.immnd.info.mds_info.svc_id = svc_evt->i_svc_id; evt->info.immnd.info.mds_info.role = svc_evt->i_role; /* Put it in IMMND's Event Queue */ rc = m_NCS_IPC_SEND(&cb->immnd_mbx, (NCSCONTEXT)evt, priority); if (rc != NCSCC_RC_SUCCESS) { LOG_WA("NCS IPC Send Failed"); } TRACE_LEAVE(); return rc; }
/*************************************************************************** @brief : Match the filter. If matches, post the evt to the client MBX and increment the cbk count of the the corresponding hdl node. If for a client, more than one scope matches, then those many no of evts are posted to the MBX. @param[in] : client_info - For which filter match to be performed. @param[in] : cbk_evt - Evt received from SMFND. @return : NCSCC_RC_FAILURE/NCSCC_RC_SUCCESS. ***************************************************************************/ uint32_t smfa_cbk_filter_match(SMFA_CLIENT_INFO *client_info,SMF_CBK_EVT *cbk_evt) { SMFA_SCOPE_INFO *scope_info = client_info->scope_info_list; uint32_t rc = NCSCC_RC_FAILURE; uint32_t no_of_filters; SMF_EVT *evt; SMFA_CBK_HDL_LIST *hdl_list = NULL; while(scope_info){ for (no_of_filters=0; no_of_filters < scope_info->scope_of_interest.filtersNumber; no_of_filters++){ if (NCSCC_RC_SUCCESS == smfa_cbk_filter_type_match( &scope_info->scope_of_interest.filters[no_of_filters],&cbk_evt->cbk_label)){ /* Post to the application MBX.*/ evt = (SMF_EVT *)calloc(1,sizeof(SMF_EVT)); if (NULL == evt){ LOG_ER("SMFA: calloc FAILED, error: %s",strerror(errno)); osafassert(0); } evt->evt.cbk_evt.cbk_label.label = (SaUint8T *)calloc(1, cbk_evt->cbk_label.labelSize * sizeof(SaUint8T)); if (NULL == evt->evt.cbk_evt.cbk_label.label){ LOG_ER("SMFA: calloc FAILED, error: %s",strerror(errno)); osafassert(0); } if (cbk_evt->params){ evt->evt.cbk_evt.params = (SaStringT)calloc(1,cbk_evt->params_len + 1); if (NULL == evt->evt.cbk_evt.params){ LOG_ER("SMFA: calloc FAILED, error: %s",strerror(errno)); osafassert(0); } strncpy(evt->evt.cbk_evt.params,cbk_evt->params,cbk_evt->params_len); /* calloc above has set the null char at the end of params */ } evt->evt.cbk_evt.cbk_label.labelSize = cbk_evt->cbk_label.labelSize; strncpy((char *)evt->evt.cbk_evt.cbk_label.label,(char *)cbk_evt->cbk_label.label, cbk_evt->cbk_label.labelSize); evt->evt_type = SMF_CLBK_EVT; evt->evt.cbk_evt.inv_id = cbk_evt->inv_id; evt->evt.cbk_evt.scope_id = scope_info->scope_id; evt->evt.cbk_evt.camp_phase = cbk_evt->camp_phase; osaf_extended_name_alloc(osaf_extended_name_borrow(&cbk_evt->object_name), &evt->evt.cbk_evt.object_name); if (m_NCS_IPC_SEND(&client_info->cbk_mbx,(NCSCONTEXT)evt,NCS_IPC_PRIORITY_NORMAL)){ /* Increment the cbk count.*/ if (NULL != hdl_list){ /* There are two scope id matching for the same hdl.*/ }else{ /* First scope id matching for this hdl.*/ hdl_list = smfa_inv_hdl_add(cbk_evt->inv_id,client_info->client_hdl); } hdl_list->cnt++; rc = NCSCC_RC_SUCCESS; }else{ LOG_ER("SMFA: Posting to MBX failed. hdl: %llu, scoe_id: %u", client_info->client_hdl,cbk_evt->scope_id); } /* If one of the filter matches then go to the next scope id.*/ break; } } scope_info = scope_info->next_scope; } return rc; }
void proc_callback_rsp(smfd_cb_t *cb, SMFSV_EVT *evt) { SMF_EVT *cbk_rsp = &evt->info.smfd.event.cbk_rsp; uns32 rc = NCSCC_RC_SUCCESS; SMFSV_EVT *new_evt; TRACE_ENTER(); if (cbk_rsp->evt_type == SMF_CLBK_EVT) { /* Ignore, log and return, SMF-D should not receive this */ TRACE_LEAVE2("Received SMF_CLBK_EVT, which should not be the case"); return; } else if (cbk_rsp->evt_type == SMF_RSP_EVT) { SMFD_SMFND_ADEST_INVID_MAP *prev=NULL, *temp = smfd_cb->smfnd_list; TRACE_2("Received evt_type: %d, inv_id: %llu, err: %d", cbk_rsp->evt_type, cbk_rsp->evt.resp_evt.inv_id, cbk_rsp->evt.resp_evt.err); while (temp != NULL) { if (temp->inv_id == cbk_rsp->evt.resp_evt.inv_id) { /* check the response */ TRACE_2("found the node with inv_id: %llu", cbk_rsp->evt.resp_evt.inv_id); if (cbk_rsp->evt.resp_evt.err == SA_AIS_ERR_FAILED_OPERATION) { temp->no_of_cbks = 0; } else if (cbk_rsp->evt.resp_evt.err == SA_AIS_OK) { temp->no_of_cbks--; } if (temp->no_of_cbks == 0) { /* all responses are received for this inv_id */ TRACE("last response received, cleaning up the node"); if (prev == NULL) { smfd_cb->smfnd_list = smfd_cb->smfnd_list->next_invid; } else { prev->next_invid = temp->next_invid; } #if 0 /* send the consolidated response to camp/proc thread */ if (temp->proc == NULL) { /* callback was invoked from campaign thread */ SmfCampaignThread *camp_thread = SmfCampaignThread::instance(); rc = m_NCS_IPC_SEND(&camp_thread->m_resp_mbx, cbk_rsp->resp_evt, NCS_IPC_PRIORITY_HIGH); if (rc != NCSCC_RC_SUCCESS) { LOG_CR("IPC send failed %d, %s", rc, strerror(errno)); } } else { /* callback invoked from procedure thread */ rc = m_NCS_IPC_SEND(&temp->proc->m_resp_mbx, cbk_rsp->resp_evt, NCS_IPC_PRIORITY_HIGH); if (rc != NCSCC_RC_SUCCESS) { LOG_CR("IPC send failed %d, %s", rc, strerror(errno)); } } #endif new_evt = (SMFSV_EVT *)calloc (1, sizeof(SMFSV_EVT)); memcpy (new_evt, evt, sizeof(SMFSV_EVT)); rc = m_NCS_IPC_SEND(temp->cbk_mbx, new_evt, NCS_IPC_PRIORITY_HIGH); if (rc != NCSCC_RC_SUCCESS) { LOG_CR("IPC send failed %d, %s", rc, strerror(errno)); } free(temp); break; /* from the while, otherwise return from function */ } } prev = temp; temp = temp->next_invid; } } TRACE_LEAVE(); return; }
/**************************************************************************** * Name : mqnd_mds_direct_rcv * * Description : MDS will call this function on receiving MQND/ASAPi messages. * * Arguments : cb - MQND Control Block * direct_rcv_info - MDS Direct Receive information. * * Return Values : NCSCC_RC_SUCCESS/Error Code. * * Notes : None. *****************************************************************************/ static uns32 mqnd_mds_direct_rcv(MQND_CB *pMqnd, MDS_CALLBACK_DIRECT_RECEIVE_INFO *direct_rcv_info) { uns32 rc = NCSCC_RC_SUCCESS, is_valid_msg_fmt; MQSV_DSEND_EVT *pEvt = (MQSV_DSEND_EVT *)direct_rcv_info->i_direct_buff; NCS_BOOL endianness = machineEndianness(); is_valid_msg_fmt = m_NCS_MSG_FORMAT_IS_VALID(direct_rcv_info->i_msg_fmt_ver, MQND_WRT_MQA_SUBPART_VER_AT_MIN_MSG_FMT, MQND_WRT_MQA_SUBPART_VER_AT_MAX_MSG_FMT, mqnd_mqa_msg_fmt_table); if (!is_valid_msg_fmt || (direct_rcv_info->i_msg_fmt_ver == 1)) { /* Drop The Message */ m_LOG_MQSV_ND(MQND_MSG_FRMT_VER_INVALID, NCSFL_LC_MQSV_INIT, NCSFL_SEV_ERROR, is_valid_msg_fmt, __FILE__, __LINE__); TRACE("mqnd_mds_direct_rcv:INVALID MSG FORMAT %d", is_valid_msg_fmt); return NCSCC_RC_FAILURE; } pEvt->sinfo.ctxt = direct_rcv_info->i_msg_ctxt; pEvt->sinfo.dest = direct_rcv_info->i_fr_dest; pEvt->sinfo.to_svc = direct_rcv_info->i_fr_svc_id; if (direct_rcv_info->i_rsp_reqd) { pEvt->sinfo.stype = MDS_SENDTYPE_RSP; } m_LOG_MQSV_ND(MQND_MDS_SNDDIRECT_RCV, NCSFL_LC_MQSV_INIT, NCSFL_SEV_INFO, endianness, __FILE__, __LINE__); /* If the endianess of the source is different, decode to host order */ if (pEvt->endianness != endianness) { pEvt->type.raw = m_MQSV_REVERSE_ENDIAN_L(&pEvt->type, endianness); switch (pEvt->type.req_type) { case MQP_EVT_SEND_MSG: { pEvt->agent_mds_dest = m_MQSV_REVERSE_ENDIAN_LL(&pEvt->agent_mds_dest, endianness); pEvt->info.snd_msg.msgHandle = m_MQSV_REVERSE_ENDIAN_LL(&pEvt->info.snd_msg.msgHandle, endianness); pEvt->info.snd_msg.queueHandle = m_MQSV_REVERSE_ENDIAN_LL(&pEvt->info.snd_msg.queueHandle, endianness); pEvt->info.snd_msg.destination.length = m_MQSV_REVERSE_ENDIAN_S(&pEvt->info.snd_msg.destination.length, endianness); pEvt->info.snd_msg.ackFlags = m_MQSV_REVERSE_ENDIAN_L(&pEvt->info.snd_msg.ackFlags, endianness); pEvt->info.snd_msg.messageInfo.sendTime = m_MQSV_REVERSE_ENDIAN_LL(&pEvt->info.snd_msg.messageInfo.sendTime, endianness); pEvt->info.snd_msg.messageInfo.sendReceive = m_MQSV_REVERSE_ENDIAN_L(&pEvt->info.snd_msg.messageInfo.sendReceive, endianness); if (pEvt->info.snd_msg.messageInfo.sendReceive == SA_FALSE) { pEvt->info.snd_msg.messageInfo.sender.senderId = m_MQSV_REVERSE_ENDIAN_LL(&pEvt->info.snd_msg.messageInfo.sender.senderId, endianness); } else { pEvt->info.snd_msg.messageInfo.sender.sender_context.sender_dest = m_MQSV_REVERSE_ENDIAN_LL(&pEvt->info.snd_msg.messageInfo.sender. sender_context.sender_dest, endianness); pEvt->info.snd_msg.messageInfo.sender.sender_context.reply_buffer_size = m_MQSV_REVERSE_ENDIAN_LL(&pEvt->info.snd_msg.messageInfo.sender. sender_context.reply_buffer_size, endianness); } pEvt->info.snd_msg.message.type = m_MQSV_REVERSE_ENDIAN_L(&pEvt->info.snd_msg.message.type, endianness); pEvt->info.snd_msg.message.version = m_MQSV_REVERSE_ENDIAN_L(&pEvt->info.snd_msg.message.version, endianness); pEvt->info.snd_msg.message.size = m_MQSV_REVERSE_ENDIAN_LL(&pEvt->info.snd_msg.message.size, endianness); pEvt->info.snd_msg.message.senderName.length = m_MQSV_REVERSE_ENDIAN_S(&pEvt->info.snd_msg.message.senderName.length, endianness); } break; case MQP_EVT_SEND_MSG_ASYNC: { pEvt->agent_mds_dest = m_MQSV_REVERSE_ENDIAN_LL(&pEvt->agent_mds_dest, endianness); pEvt->info.sndMsgAsync.SendMsg.msgHandle = m_MQSV_REVERSE_ENDIAN_LL(&pEvt->info.sndMsgAsync.SendMsg.msgHandle, endianness); pEvt->info.sndMsgAsync.SendMsg.queueHandle = m_MQSV_REVERSE_ENDIAN_LL(&pEvt->info.sndMsgAsync.SendMsg.queueHandle, endianness); pEvt->info.sndMsgAsync.SendMsg.destination.length = m_MQSV_REVERSE_ENDIAN_S(&pEvt->info.sndMsgAsync.SendMsg.destination.length, endianness); pEvt->info.sndMsgAsync.SendMsg.ackFlags = m_MQSV_REVERSE_ENDIAN_L(&pEvt->info.sndMsgAsync.SendMsg.ackFlags, endianness); pEvt->info.sndMsgAsync.SendMsg.messageInfo.sendTime = m_MQSV_REVERSE_ENDIAN_LL(&pEvt->info.sndMsgAsync.SendMsg.messageInfo.sendTime, endianness); pEvt->info.sndMsgAsync.SendMsg.messageInfo.sendReceive = m_MQSV_REVERSE_ENDIAN_L(&pEvt->info.sndMsgAsync.SendMsg.messageInfo.sendReceive, endianness); pEvt->info.sndMsgAsync.SendMsg.messageInfo.sender.senderId = m_MQSV_REVERSE_ENDIAN_LL(&pEvt->info.sndMsgAsync.SendMsg.messageInfo.sender. senderId, endianness); pEvt->info.sndMsgAsync.SendMsg.message.type = m_MQSV_REVERSE_ENDIAN_L(&pEvt->info.sndMsgAsync.SendMsg.message.type, endianness); pEvt->info.sndMsgAsync.SendMsg.message.version = m_MQSV_REVERSE_ENDIAN_L(&pEvt->info.sndMsgAsync.SendMsg.message.version, endianness); pEvt->info.sndMsgAsync.SendMsg.message.size = m_MQSV_REVERSE_ENDIAN_LL(&pEvt->info.sndMsgAsync.SendMsg.message.size, endianness); pEvt->info.sndMsgAsync.SendMsg.message.senderName.length = m_MQSV_REVERSE_ENDIAN_S(&pEvt->info.sndMsgAsync.SendMsg.message.senderName.length, endianness); pEvt->info.sndMsgAsync.invocation = m_MQSV_REVERSE_ENDIAN_LL(&pEvt->info.sndMsgAsync.invocation, endianness); } break; case MQP_EVT_STAT_UPD_REQ: { pEvt->info.statsReq.qhdl = m_MQSV_REVERSE_ENDIAN_LL(&pEvt->info.statsReq.qhdl, endianness); pEvt->info.statsReq.size = m_MQSV_REVERSE_ENDIAN_LL(&pEvt->info.statsReq.size, endianness); } break; default: return NCSCC_RC_FAILURE; } } /* Put it in MQND's Event Queue */ rc = m_NCS_IPC_SEND(&pMqnd->mbx, (NCSCONTEXT)pEvt, NCS_IPC_PRIORITY_NORMAL); if (NCSCC_RC_SUCCESS != rc) { m_LOG_MQSV_ND(MQND_MDS_SND_TO_MAILBOX_FAILED, NCSFL_LC_MQSV_INIT, NCSFL_SEV_ERROR, rc, __FILE__, __LINE__); } return rc; }
static uns32 mqnd_mds_svc_evt(MQND_CB *cb, MDS_CALLBACK_SVC_EVENT_INFO *svc_evt) { uns32 rc = NCSCC_RC_SUCCESS, to_dest_slotid, o_msg_fmt_ver; switch (svc_evt->i_change) { case NCSMDS_DOWN: if (svc_evt->i_svc_id == NCSMDS_SVC_ID_MQD) { if (cb->is_mqd_up == TRUE) { /* If MQD is already UP */ cb->is_mqd_up = FALSE; m_LOG_MQSV_ND(MQND_MQD_SERVICE_WENT_DOWN, NCSFL_LC_MQSV_INIT, NCSFL_SEV_NOTICE, rc, __FILE__, __LINE__); return NCSCC_RC_SUCCESS; } } else if (svc_evt->i_svc_id == NCSMDS_SVC_ID_MQA) { MQSV_EVT *evt = NULL; /* Post the event to Clean all the Queues opened by applications on this agent */ evt = m_MMGR_ALLOC_MQSV_EVT(NCS_SERVICE_ID_MQND); if (evt == NULL) { m_LOG_MQSV_ND(MQND_EVT_ALLOC_FAILED, NCSFL_LC_MQSV_INIT, NCSFL_SEV_ERROR, NCSCC_RC_FAILURE, __FILE__, __LINE__); return NCSCC_RC_FAILURE; } evt->evt_type = MQSV_NOT_DSEND_EVENT; evt->type = MQSV_EVT_MQND_CTRL; evt->msg.mqnd_ctrl.type = MQND_CTRL_EVT_MDS_INFO; evt->msg.mqnd_ctrl.info.mds_info.change = svc_evt->i_change; evt->msg.mqnd_ctrl.info.mds_info.dest = svc_evt->i_dest; evt->msg.mqnd_ctrl.info.mds_info.svc_id = svc_evt->i_svc_id; m_LOG_MQSV_ND(MQND_MQA_SERVICE_WENT_DOWN, NCSFL_LC_MQSV_INIT, NCSFL_SEV_NOTICE, m_NCS_NODE_ID_FROM_MDS_DEST(svc_evt->i_dest), __FILE__, __LINE__); /* Post the event to MQND Thread */ rc = m_NCS_IPC_SEND(&cb->mbx, evt, NCS_IPC_PRIORITY_HIGH); if (rc != NCSCC_RC_SUCCESS) { m_LOG_MQSV_ND(MQND_MDS_SND_TO_MAILBOX_FAILED, NCSFL_LC_MQSV_INIT, NCSFL_SEV_ERROR, rc, __FILE__, __LINE__); } /* mqnd_proc_mqa_down(cb, &svc_evt->i_dest); */ } else return NCSCC_RC_SUCCESS; break; case NCSMDS_UP: switch (svc_evt->i_svc_id) { case NCSMDS_SVC_ID_MQD: { cb->is_mqd_up = TRUE; cb->mqd_dest = svc_evt->i_dest; m_LOG_MQSV_ND(MQND_MQD_SERVICE_CAME_UP, NCSFL_LC_MQSV_INIT, NCSFL_SEV_NOTICE, rc, __FILE__, __LINE__); to_dest_slotid = mqsv_get_phy_slot_id(svc_evt->i_dest); o_msg_fmt_ver = m_NCS_ENC_MSG_FMT_GET(svc_evt->i_rem_svc_pvt_ver, MQND_WRT_MQD_SUBPART_VER_AT_MIN_MSG_FMT, MQND_WRT_MQD_SUBPART_VER_AT_MAX_MSG_FMT, mqnd_mqd_msg_fmt_table); if (!o_msg_fmt_ver) /*Log informing the existence of Non compatible MQD version, Slot id being logged */ m_LOG_MQSV_ND(MQND_MSG_FRMT_VER_INVALID, NCSFL_LC_MQSV_INIT, NCSFL_SEV_ERROR, to_dest_slotid, __FILE__, __LINE__); } break; case NCSMDS_SVC_ID_MQA: { MQSV_EVT *evt = NULL; to_dest_slotid = mqsv_get_phy_slot_id(svc_evt->i_dest); o_msg_fmt_ver = m_NCS_ENC_MSG_FMT_GET(svc_evt->i_rem_svc_pvt_ver, MQND_WRT_MQA_SUBPART_VER_AT_MIN_MSG_FMT, MQND_WRT_MQA_SUBPART_VER_AT_MAX_MSG_FMT, mqnd_mqa_msg_fmt_table); if (!o_msg_fmt_ver) /*Log informing the existence of Non compatible MQA version, Slot id being logged */ m_LOG_MQSV_ND(MQND_MSG_FRMT_VER_INVALID, NCSFL_LC_MQSV_INIT, NCSFL_SEV_ERROR, to_dest_slotid, __FILE__, __LINE__); /* Post the event to Update the MQA list */ evt = m_MMGR_ALLOC_MQSV_EVT(NCS_SERVICE_ID_MQND); if (evt == NULL) { m_LOG_MQSV_ND(MQND_EVT_ALLOC_FAILED, NCSFL_LC_MQSV_INIT, NCSFL_SEV_ERROR, NCSCC_RC_FAILURE, __FILE__, __LINE__); return NCSCC_RC_FAILURE; } evt->evt_type = MQSV_NOT_DSEND_EVENT; evt->type = MQSV_EVT_MQND_CTRL; evt->msg.mqnd_ctrl.type = MQND_CTRL_EVT_MDS_MQA_UP_INFO; evt->msg.mqnd_ctrl.info.mqa_up_info.mqa_up_dest = svc_evt->i_dest; m_LOG_MQSV_ND(MQND_MQA_CAME_UP, NCSFL_LC_MQSV_INIT, NCSFL_SEV_NOTICE, m_NCS_NODE_ID_FROM_MDS_DEST(svc_evt->i_dest), __FILE__, __LINE__); /* Post the event to MQND Thread */ rc = m_NCS_IPC_SEND(&cb->mbx, evt, NCS_IPC_PRIORITY_HIGH); if (rc != NCSCC_RC_SUCCESS) { m_LOG_MQSV_ND(MQND_MDS_SND_TO_MAILBOX_FAILED, NCSFL_LC_MQSV_INIT, NCSFL_SEV_ERROR, rc, __FILE__, __LINE__); m_MMGR_FREE_MQSV_EVT(evt, NCS_SERVICE_ID_MQND); return rc; } } break; default: break; } break; case NCSMDS_NO_ACTIVE: cb->is_mqd_up = FALSE; break; case NCSMDS_NEW_ACTIVE: cb->is_mqd_up = TRUE; { MQSV_EVT *evt = NULL; evt = m_MMGR_ALLOC_MQSV_EVT(NCS_SERVICE_ID_MQND); if (evt == NULL) { cb->is_mqd_up = TRUE; m_LOG_MQSV_ND(MQND_EVT_ALLOC_FAILED, NCSFL_LC_MQSV_INIT, NCSFL_SEV_ERROR, NCSCC_RC_FAILURE, __FILE__, __LINE__); return NCSCC_RC_FAILURE; } memset(evt, 0, sizeof(MQSV_EVT)); evt->evt_type = MQSV_NOT_DSEND_EVENT; evt->type = MQSV_EVT_MQND_CTRL; evt->msg.mqnd_ctrl.type = MQND_CTRL_EVT_DEFERRED_MQA_RSP; /* Post the event to MQND Thread */ rc = m_NCS_IPC_SEND(&cb->mbx, evt, NCS_IPC_PRIORITY_HIGH); if (rc != NCSCC_RC_SUCCESS) { m_LOG_MQSV_ND(MQND_MDS_SND_TO_MAILBOX_FAILED, NCSFL_LC_MQSV_INIT, NCSFL_SEV_ERROR, rc, __FILE__, __LINE__); } } break; default: break; } return NCSCC_RC_SUCCESS; }
/**************************************************************************** Name : eda_eds_msg_proc Description : This routine is used to process the ASYNC incoming EDS messages. Arguments : pointer to struct ncsmds_callback_info Return Values : NCSCC_RC_SUCCESS/NCSCC_RC_FAILURE Notes : None. ******************************************************************************/ static uint32_t eda_eds_msg_proc(EDA_CB *eda_cb, EDSV_MSG *edsv_msg, MDS_SEND_PRIORITY_TYPE prio) { switch (edsv_msg->type) { case EDSV_EDS_CBK_MSG: switch (edsv_msg->info.cbk_info.type) { case EDSV_EDS_CHAN_OPEN: { EDA_CLIENT_HDL_REC *eda_hdl_rec; EDA_CHANNEL_HDL_REC *channel_hdl_rec; EDSV_EDA_CHAN_OPEN_CBK_PARAM *cbk_param = &edsv_msg->info.cbk_info.param.chan_open_cbk; /** Create the chan hdl record here before ** queing this message onto the priority queue ** so that the dispatch by the application to fetch ** the callback is instantaneous. **/ /** Lookup the hdl rec by reg_id **/ if (NULL == (eda_hdl_rec = eda_find_hdl_rec_by_regid(eda_cb, edsv_msg->info.cbk_info. eds_reg_id))) { TRACE_4("client handle record for reg_id: %u not found", edsv_msg->info.cbk_info.eds_reg_id); eda_msg_destroy(edsv_msg); return NCSCC_RC_FAILURE; } /** Create/add a channel record to the hdl rec with ** the information received in this message. ** only if the return status was SA_AIS_OK. **/ if (SA_AIS_OK == cbk_param->error) { if (NULL == (channel_hdl_rec = eda_channel_hdl_rec_add(&eda_hdl_rec, cbk_param->chan_id, cbk_param->chan_open_id, cbk_param-> chan_open_flags, &cbk_param-> chan_name))) { TRACE_4("channel add failed for chan_id: %u, chan_open_id: %u, \ channelname: %s", cbk_param->chan_id, cbk_param->chan_open_id, cbk_param->chan_name.value); eda_msg_destroy(edsv_msg); return NCSCC_RC_FAILURE; } /** pass on the channel_hdl to the application thru cbk **/ cbk_param->eda_chan_hdl = channel_hdl_rec->channel_hdl; } /** enqueue this message anyway **/ if (NCSCC_RC_SUCCESS != m_NCS_IPC_SEND(&eda_hdl_rec->mbx, edsv_msg, prio)) { TRACE_4("IPC send failed failed for msg type: %u", edsv_msg->type); return NCSCC_RC_FAILURE; } } break; case EDSV_EDS_DELIVER_EVENT: { EDA_CLIENT_HDL_REC *eda_hdl_rec; EDA_CHANNEL_HDL_REC *chan_hdl_rec; EDA_EVENT_HDL_REC *evt_hdl_rec; EDSV_EDA_EVT_DELIVER_CBK_PARAM *evt_dlv_param = &edsv_msg->info.cbk_info.param.evt_deliver_cbk; /** Lookup the hdl rec **/ if (NULL == (eda_hdl_rec = eda_find_hdl_rec_by_regid(eda_cb, edsv_msg->info.cbk_info. eds_reg_id))) { TRACE_4("reg record not found reg_id: %u", edsv_msg->info.cbk_info.eds_reg_id); edsv_free_evt_pattern_array(evt_dlv_param->pattern_array); evt_dlv_param->pattern_array = NULL; /** free the event data if any **/ if (evt_dlv_param->data) { m_MMGR_FREE_EDSV_EVENT_DATA(evt_dlv_param->data); evt_dlv_param->data = NULL; } eda_msg_destroy(edsv_msg); return NCSCC_RC_FAILURE; } /** Lookup the channel record to which ** this event belongs **/ if (NULL == (chan_hdl_rec = eda_find_chan_hdl_rec_by_chan_id(eda_hdl_rec, evt_dlv_param->chan_id, evt_dlv_param-> chan_open_id))) { TRACE_4("chan rec not found for chan_id: %u, chan_open_id: %u", evt_dlv_param->chan_id, evt_dlv_param->chan_open_id); edsv_free_evt_pattern_array(evt_dlv_param->pattern_array); evt_dlv_param->pattern_array = NULL; /** free the event data if any **/ if (evt_dlv_param->data) { m_MMGR_FREE_EDSV_EVENT_DATA(evt_dlv_param->data); evt_dlv_param->data = NULL; } eda_msg_destroy(edsv_msg); return NCSCC_RC_FAILURE; } /** Create/Add the new event record. **/ if (NULL == (evt_hdl_rec = eda_event_hdl_rec_add(&chan_hdl_rec))) { edsv_free_evt_pattern_array(evt_dlv_param->pattern_array); evt_dlv_param->pattern_array = NULL; /** free the event data if any **/ if (evt_dlv_param->data) { m_MMGR_FREE_EDSV_EVENT_DATA(evt_dlv_param->data); evt_dlv_param->data = NULL; } eda_msg_destroy(edsv_msg); TRACE_4("event record add failed"); return NCSCC_RC_FAILURE; } /** Initialize the fields in the evt_hdl_rec with data ** received in the message. **/ evt_hdl_rec->priority = evt_dlv_param->priority; evt_hdl_rec->publisher_name = evt_dlv_param->publisher_name; evt_hdl_rec->publish_time = evt_dlv_param->publish_time; evt_hdl_rec->retention_time = evt_dlv_param->retention_time; evt_hdl_rec->event_data_size = evt_dlv_param->data_len; /** mark the event as rcvd. **/ evt_hdl_rec->evt_type |= EDA_EVT_RECEIVED; /** Create/Add the new event inst record. **/ /** The evt hdl rec will take ownership of the memory ** for the patterns & data to avoid too many copies ** and not that much use of these in the callback. **/ evt_hdl_rec->del_evt_id = evt_dlv_param->eda_event_id; evt_hdl_rec->pattern_array = evt_dlv_param->pattern_array; evt_dlv_param->pattern_array = NULL; evt_hdl_rec->evt_data = evt_dlv_param->data; evt_dlv_param->data = NULL; /* assign the newly allocated hdl */ evt_dlv_param->event_hdl = evt_hdl_rec->event_hdl; /** enqueue this message. MDS & IPC priority match 1-1 **/ if (NCSCC_RC_SUCCESS != m_NCS_IPC_SEND(&eda_hdl_rec->mbx, edsv_msg, prio)) { TRACE_4("IPC send failed for msg type: %u", edsv_msg->type); return NCSCC_RC_FAILURE; } } break; case EDSV_EDS_CLMNODE_STATUS: { EDSV_EDA_CLM_STATUS_CBK_PARAM *clm_status_param = &edsv_msg->info.cbk_info.param.clm_status_cbk; eda_cb->node_status = (SaClmClusterChangesT)clm_status_param->node_status; TRACE_1("Local node membership changed to : %u", eda_cb->node_status); } break; default: TRACE_3("unknown message type: %u", edsv_msg->info.cbk_info.type); return NCSCC_RC_FAILURE; break; } break; case EDSV_EDS_MISC_MSG: /** No messages conceived yet **/ TRACE_1("Unsupported message type"); return NCSCC_RC_FAILURE; break; default: /** Unexpected message **/ TRACE_4("Wrong message type"); return NCSCC_RC_FAILURE; break; }
/****************************************************************************\ * Function: avd_role_failover_qsd_actv * * Purpose: AVSV function to handle AVD's fail-over. * * Input: cb - AVD control block pointer. * role - Role to be set. * * Returns: NCSCC_RC_SUCCESS/NCSCC_RC_FAILURE. * * NOTES: * * \**************************************************************************/ static uns32 avd_role_failover_qsd_actv(AVD_CL_CB *cb, SaAmfHAStateT role) { uns32 status = NCSCC_RC_SUCCESS; AVD_AVND *avnd = NULL; AVD_AVND *avnd_other = NULL; AVD_EVT *evt = AVD_EVT_NULL; NCSMDS_INFO svc_to_mds_info; TRACE_ENTER(); LOG_NO("FAILOVER Quiesced --> Active"); /* If we are in the middle of admin switch, ignore it */ if (cb->swap_switch == SA_TRUE) { cb->swap_switch = SA_FALSE; } /* * Check whether Standby is in sync with Active. If yes then * proceed further. Else return failure. */ if (AVD_STBY_OUT_OF_SYNC == cb->stby_sync_state) { LOG_ER("FAILOVER Quiesced --> Active FAILED, Stanby OUT OF SYNC"); return NCSCC_RC_FAILURE; } if (NULL == (avnd = avd_node_find_nodeid(cb->node_id_avd))) { LOG_ER("FAILOVER Quiesced --> Active FAILED, DB not found"); return NCSCC_RC_FAILURE; } /* check the node state */ if (avnd->node_state != AVD_AVND_STATE_PRESENT) { LOG_ER("FAILOVER Quiesced --> Active FAILED, stdby not in good state"); return NCSCC_RC_FAILURE; } /* Section to check whether AvD was doing a role change before getting into failover */ svc_to_mds_info.i_mds_hdl = cb->vaddr_pwe_hdl; svc_to_mds_info.i_svc_id = NCSMDS_SVC_ID_AVD; svc_to_mds_info.i_op = MDS_QUERY_DEST; svc_to_mds_info.info.query_dest.i_dest = cb->vaddr; svc_to_mds_info.info.query_dest.i_svc_id = NCSMDS_SVC_ID_AVD; svc_to_mds_info.info.query_dest.i_query_for_role = TRUE; svc_to_mds_info.info.query_dest.info.query_for_role.i_anc = 0; // TODO? if (ncsmds_api(&svc_to_mds_info) == NCSCC_RC_SUCCESS) { if (svc_to_mds_info.info.query_dest.info.query_for_role.o_vdest_rl == V_DEST_RL_ACTIVE) { /* We were in middle of switch, but we had not progresses much with role switch functionality. * its ok to just change the NCS SU's who are already quiesced, back to Active. * Post an evt on mailbox to set active role to all NCS SU * */ AVD_EVT evt; memset(&evt, '\0', sizeof(AVD_EVT)); evt.rcv_evt = AVD_EVT_SWITCH_NCS_SU; /* set cb state to active */ cb->avail_state_avd = role; avd_role_switch_ncs_su_evh(cb, &evt); if (NULL != (avnd_other = avd_node_find_nodeid(cb->node_id_avd_other))) { /* We are successfully changed role to Active. do node down processing for other node */ avd_node_mark_absent(avnd_other); } else { LOG_EM("%s:%u: %u", __FILE__, __LINE__, NCSCC_RC_FAILURE); } return NCSCC_RC_SUCCESS; /* END OF THIS FLOW */ } } /* We are not in middle of role switch functionality, carry on with normal failover flow */ avsv_set_ckpt_role(cb, SA_AMF_HA_ACTIVE); /* Now Dispatch all the messages from the MBCSv mail-box */ if (NCSCC_RC_SUCCESS != (status = avsv_mbcsv_dispatch(cb, SA_DISPATCH_ALL))) { LOG_ER("FAILOVER Quiesced --> Active FAILED, MBCSV DISPATCH FAILED"); return NCSCC_RC_FAILURE; } /* * We might be having some async update messages in the * Queue to be processed, now drop all of them. */ avsv_dequeue_async_update_msgs(cb, FALSE); cb->avail_state_avd = role; /* Declare this standby as Active. Set Vdest role and MBCSv role */ if (NCSCC_RC_SUCCESS != (status = avd_mds_set_vdest_role(cb, role))) { LOG_ER("%s: avd_mds_set_vdest_role failed", __FUNCTION__); } /* Time to send fail-over messages to all the AVND's */ avd_fail_over_event(cb); /* We need to send the role to AvND. */ status = avd_avnd_send_role_change(cb, cb->node_id_avd, cb->avail_state_avd); if (NCSCC_RC_SUCCESS != status) { LOG_ER("%s: avd_avnd_send_role_change failed", __FUNCTION__); } else { avd_d2n_msg_dequeue(cb); } /* Post an evt on mailbox to set active role to all NCS SU */ /* create the message event */ evt = calloc(1, sizeof(AVD_EVT)); if (evt == NULL) { LOG_ER("%s: calloc failed", __FUNCTION__); return NCSCC_RC_FAILURE; } evt->rcv_evt = AVD_EVT_SWITCH_NCS_SU; if (m_NCS_IPC_SEND(&cb->avd_mbx, evt, NCS_IPC_PRIORITY_HIGH) != NCSCC_RC_SUCCESS) { LOG_ER("FAILOVER Quiesced --> Active FAILED, IPC SEND FAILED"); free(evt); return NCSCC_RC_FAILURE; } /* We are successfully changed role to Active. Gen a reset * responce for the other card. TODO */ cb->node_id_avd_other = 0; if (avd_imm_config_get() != NCSCC_RC_SUCCESS) return NCSCC_RC_FAILURE; avd_imm_impl_set_task_create(); TRACE_LEAVE(); return NCSCC_RC_SUCCESS; }
/**************************************************************************** Name : ntfa_ntfs_msg_proc Description : This routine is used to process the ASYNC incoming NTFS messages. Arguments : pointer to struct ncsmds_callback_info Return Values : NCSCC_RC_SUCCESS/NCSCC_RC_FAILURE Notes : None. ******************************************************************************/ uns32 ntfa_ntfs_msg_proc(ntfa_cb_t *cb, ntfsv_msg_t *ntfsv_msg, MDS_SEND_PRIORITY_TYPE prio) { TRACE_ENTER(); switch (ntfsv_msg->type) { case NTFSV_NTFS_CBK_MSG: switch (ntfsv_msg->info.cbk_info.type) { case NTFSV_NOTIFICATION_CALLBACK: { ntfa_client_hdl_rec_t *ntfa_hdl_rec; TRACE_2("NTFSV_NOTIFICATION_CALLBACK: " "subscriptionId = %d," " client_id = %d", (int)ntfsv_msg->info.cbk_info.subscriptionId, (int)ntfsv_msg->info.cbk_info.ntfs_client_id); /** Lookup the hdl rec by client_id **/ if (NULL == (ntfa_hdl_rec = ntfa_find_hdl_rec_by_client_id(cb, ntfsv_msg->info.cbk_info.ntfs_client_id))) { TRACE("client_id not found"); ntfa_msg_destroy(ntfsv_msg); TRACE_LEAVE(); return NCSCC_RC_FAILURE; } /** enqueue this message **/ if (NCSCC_RC_SUCCESS != m_NCS_IPC_SEND(&ntfa_hdl_rec->mbx, ntfsv_msg, prio)) { TRACE("IPC SEND FAILED"); TRACE_LEAVE(); return NCSCC_RC_FAILURE; } } break; case NTFSV_DISCARDED_CALLBACK: { ntfa_client_hdl_rec_t *ntfa_hdl_rec; TRACE_2("NTFSV_DISCARDED_CALLBACK: " "subscriptionId = %d," " client_id = %d", (int)ntfsv_msg->info.cbk_info.subscriptionId, (int)ntfsv_msg->info.cbk_info.ntfs_client_id); /** Lookup the hdl rec by client_id **/ if (NULL == (ntfa_hdl_rec = ntfa_find_hdl_rec_by_client_id(cb, ntfsv_msg->info.cbk_info.ntfs_client_id))) { TRACE("client_id not found"); ntfa_msg_destroy(ntfsv_msg); TRACE_LEAVE(); return NCSCC_RC_FAILURE; } /** enqueue this message **/ if (NCSCC_RC_SUCCESS != m_NCS_IPC_SEND(&ntfa_hdl_rec->mbx, ntfsv_msg, prio)) { TRACE("IPC SEND FAILED"); TRACE_LEAVE(); return NCSCC_RC_FAILURE; } } break; default: TRACE("unknown type %d", ntfsv_msg->info.cbk_info.type); TRACE_LEAVE(); return NCSCC_RC_FAILURE; break; } break; default: /** Unexpected message **/ TRACE_2("Unexpected message type: %d", ntfsv_msg->type); TRACE_LEAVE(); return NCSCC_RC_FAILURE; break; } TRACE_LEAVE(); return NCSCC_RC_SUCCESS; }