/* * Basic handler for IP packets */ unsigned net_hndl_ip(const uint8_t *pkt, shell *sh) { const struct iphdr *hdr = (const struct iphdr *) pkt; void *lower_from = sh->from.lower_node, *lower_to = sh->to.lower_node; struct stat_ip *node; unsigned score = 0; sh->from.lower_node = get_node_ip(hdr->saddr); node = (struct stat_ip *)(sh->from.lower_node); node->ether = lower_from; if(!node->count) { score += SCORE_IP; } sh->to.lower_node = get_node_ip(hdr->daddr); node = (struct stat_ip *)(sh->to.lower_node); node->ether = lower_to; if(!node->count) { score += SCORE_IP; } switch(hdr->protocol) { case IPPROTO_TCP: sh->from.higher_type = IP_TYPE_TCP; sh->from.higher_data = NULL; sh->to.higher_type = IP_TYPE_NONE; sh->to.higher_data = NULL; break; case IPPROTO_UDP: score += net_hndl_udp(pkt + (hdr->ihl * 4), sh); break; case IPPROTO_ICMP: sh->from.higher_type = IP_TYPE_ICMP; sh->from.higher_data = NULL; sh->to.higher_type = IP_TYPE_NONE; sh->to.higher_data = NULL; break; default: sh->from.higher_type = IP_TYPE_UNKNOWN; sh->from.higher_data = (void *) ((uint32_t) hdr->protocol); sh->to.higher_type = IP_TYPE_NONE; sh->to.higher_data = NULL; break; } score += ip_node_set_info(&sh->from, sh->time); score += ip_node_set_info(&sh->to, sh->time); sh->from.higher_type = ETH_TYPE_IP; sh->from.higher_data = sh->from.lower_node; sh->from.lower_node = lower_from; sh->to.higher_type = ETH_TYPE_IP; sh->to.higher_data = sh->to.lower_node; sh->to.lower_node = lower_to; return score; }
static void gms_confchg_fn ( enum totem_configuration_type configuration_type, unsigned int *member_list, int member_list_entries, unsigned int *left_list, int left_list_entries, unsigned int *joined_list, int joined_list_entries, struct memb_ring_id *ring_id) { int i = 0; char iface_string[256 * INTERFACE_MAX] = ""; ClRcT rc = CL_OK; ClUint32T minVersion = CL_VERSION_CODE(5, 0, 0); clLog (NOTICE,OPN,AIS, "GMS CONFIGURATION CHANGE"); clLog (NOTICE,OPN,AIS, "GMS Configuration:"); for (i = 0; i < member_list_entries; i++) { clLog (NOTICE,OPN,AIS, "\t%s", totempg_ifaces_print (member_list[i])); } clLog(NOTICE,OPN,AIS, "Members Left:"); for (i = 0; i < left_list_entries; i++) { clLog (NOTICE,OPN,AIS, "\t%s", totempg_ifaces_print (left_list[i])); } clLog(NOTICE,OPN,AIS, "Members Joined:"); for (i = 0; i < joined_list_entries; i++) { clLog (NOTICE,OPN,AIS, "\t%s", totempg_ifaces_print (joined_list[i])); } clNodeCacheMinVersionGet(NULL, &minVersion); if(minVersion >= CL_VERSION_CODE(5, 0, 0) && gAspNativeLeaderElection) { clLog(DBG, OPN, AIS, "Skipping node leave processing since node cache is used to sync cluster views"); return ; } for (i = 0; i < left_list_entries; i++) { /* Call Cluster Leave for the nodes which are left. * To do that we need to translate IP address to gms nodeID * NOTE: Currently we are getting the this left_list[i] to * the ip address mapping from the get_interface_ip function. * This is not quite reliable and may get into issues when using * multiple interfaces... */ strncpy(iface_string, get_node_ip(left_list[i]), (256*INTERFACE_MAX)-1); clLog(DBG,OPN,AIS, "Invoking cluster leave for node with IP %s",iface_string); rc = _clGmsEngineClusterLeaveWrapper(CL_GMS_CLUSTER_ID, iface_string); } return; }
int monitor_node_ip(void * owner, void * data) { ADD_TIMER_EVENT(&g_node_event, monitor_node_ip, reinterpret_cast< void *>(1), get_now_tv()->tv_sec + MONITOR_NODE_IP_INTERVAL); if (data) { if (0 == get_node_ip()) { // ip变了,直接重启node restart_node(); } } return 0; }
int plugin_init(int type) { switch (type) { case PROC_MAIN: break; case PROC_CONN: { // release版本要设置网络进程的user为nobody #ifdef SET_PROC_USER if (0 != set_user("nobody")) { ERROR_LOG("set net proc to nobody failed"); return -1; } #endif } break; case PROC_WORK: { setup_timer(); g_start_timestamp = get_now_tv()->tv_sec; get_node_ip(); switch (get_work_idx()) { case PROC_PROXY: // proxy 进程 init_proxy_process(); break; case PROC_COLLECT: // 采集进程 set_title("itl_node-COLLECT"); init_connect_to_db(); init_connect_to_head(); init_collect(); init_check_update(); break; case PROC_COMMAND: set_title("itl_node-COMMAND"); // 监视node的ip,有变动则重启 monitor_node_ip(0, 0); init_log_archive(config_get_strval("log_dir", "./log/"), 2 * 3600); break; case PROC_MYSQL: set_title("itl_node-MYSQL"); init_connect_to_db(); init_mysql_instance(); break; default: break; } } } return 0; }
static void gms_exec_message_handler ( void *message, unsigned int nodeid) { mar_req_header_t header = {0}; struct VDECL(req_exec_gms_nodejoin) req_exec_gms_nodejoin = {{0}}; ClGmsViewNodeT *node = NULL; ClRcT rc = CL_OK; ClGmsClusterMemberT thisGmsClusterNode = {0}; char nodeIp[256 * INTERFACE_MAX] = ""; int isLocalMsg = 0; int verCode = 0; ClBufferHandleT bufferHandle = NULL; /* Get the ip address string for the given nodeId */ strncpy(nodeIp, get_node_ip(nodeid), (256 * INTERFACE_MAX)-1); if (strcmp(nodeIp, totemip_print(this_ip)) == 0) { isLocalMsg = 1; } /* Unmarshall the incoming message */ rc = clBufferCreate(&bufferHandle); if (rc != CL_OK) { clLogError(OPN,AIS, "Failed to create buffer while unmarshalling the received message. rc 0x%x",rc); return; } memcpy(&header, message, sizeof(mar_req_header_t)); rc = clBufferNBytesWrite(bufferHandle, (ClUint8T *)message+sizeof(mar_req_header_t), header.size-sizeof(mar_req_header_t)); if (rc != CL_OK) { clLogError(OPN,AIS, "Failed to retrieve data from buffer. rc 0x%x",rc); goto out_delete; } rc = unmarshallReqExecGmsNodeJoin(bufferHandle, &req_exec_gms_nodejoin); if (rc != CL_OK) { clLogError(OPN,AIS,"Failed to unmarshall the data. rc 0x%x",rc); goto out_delete; } verCode = CL_VERSION_CODE(req_exec_gms_nodejoin.version.releaseCode, req_exec_gms_nodejoin.version.majorVersion, req_exec_gms_nodejoin.version.minorVersion); clLog(DBG,OPN,AIS, "Received a %d message from version [%d.%d.%d].",req_exec_gms_nodejoin.gmsMessageType, req_exec_gms_nodejoin.version.releaseCode, req_exec_gms_nodejoin.version.majorVersion, req_exec_gms_nodejoin.version.minorVersion); /* Verify version */ if (verCode > CL_VERSION_CODE(curVer.releaseCode, curVer.majorVersion, curVer.minorVersion)) { /* I received a message from higher version and it dont know * how to decode it. So it discarding it. */ clLog(NOTICE,OPN,AIS, "Version mismatch detected. Discarding the message "); goto out_delete; } // message type & message data clLog(DBG,OPN,AIS,"message type %d from groupId %d!\n", req_exec_gms_nodejoin.gmsMessageType, req_exec_gms_nodejoin.gmsGroupId); /* This message is from same version. So processing it */ switch (req_exec_gms_nodejoin.gmsMessageType) { case CL_GMS_CLUSTER_JOIN_MSG: { ClUint32T minVersion = CL_VERSION_CODE(5, 0, 0); clLog(DBG,OPN,AIS, "Received multicast message for cluster join from ioc node [%#x:%#x]", req_exec_gms_nodejoin.specificMessage.gmsClusterNode.nodeAddress.iocPhyAddress.nodeAddress, req_exec_gms_nodejoin.specificMessage.gmsClusterNode.nodeAddress.iocPhyAddress.portId); clNodeCacheMinVersionGet(NULL, &minVersion); if(minVersion >= CL_VERSION_CODE(5, 0, 0) && gAspNativeLeaderElection) { clLog(DBG, OPN, AIS, "Skipping multicast join since node cache view is used to form the cluster ring"); goto out_delete; } node = (ClGmsViewNodeT *) clHeapAllocate(sizeof(ClGmsViewNodeT)); if (node == NULL) { clLog (ERROR,OPN,AIS, "clHeapAllocate failed"); goto out_delete; } else { rc = clVersionVerify( &(gmsGlobalInfo.config.versionsSupported), &(req_exec_gms_nodejoin.specificMessage.gmsClusterNode.gmsVersion) ); ringVersion.releaseCode = req_exec_gms_nodejoin.specificMessage.gmsClusterNode.gmsVersion.releaseCode; ringVersion.majorVersion= req_exec_gms_nodejoin.specificMessage.gmsClusterNode.gmsVersion.majorVersion; ringVersion.minorVersion= req_exec_gms_nodejoin.specificMessage.gmsClusterNode.gmsVersion.minorVersion; if(rc != CL_OK) { ringVersionCheckPassed = CL_FALSE; /* copy the ring version */ clGmsCsLeave( &joinCs ); clLog (ERROR,OPN,AIS, "Server Version Mismatch detected for this join message"); break; } _clGmsGetThisNodeInfo(&thisGmsClusterNode); if( thisGmsClusterNode.nodeId != req_exec_gms_nodejoin.specificMessage.gmsClusterNode.nodeId) { /* TODO This will never happen... */ clGmsCsLeave( &joinCs ); } node->viewMember.clusterMember = req_exec_gms_nodejoin.specificMessage.gmsClusterNode; /* If this is local join, then update the IP address */ if (thisGmsClusterNode.nodeId == req_exec_gms_nodejoin.specificMessage.gmsClusterNode.nodeId) { memcpy(&node->viewMember.clusterMember.nodeIpAddress, &myAddress, sizeof(ClGmsNodeAddressT)); } rc = _clGmsEngineClusterJoin(req_exec_gms_nodejoin.gmsGroupId, req_exec_gms_nodejoin.specificMessage.gmsClusterNode.nodeId, node); } } break; case CL_GMS_CLUSTER_EJECT_MSG: clLog (DBG,OPN,AIS, "Received cluster eject multicast message from ioc node [%#x:%#x]", req_exec_gms_nodejoin.specificMessage.gmsClusterNode.nodeAddress.iocPhyAddress.nodeAddress, req_exec_gms_nodejoin.specificMessage.gmsClusterNode.nodeAddress.iocPhyAddress.portId); /* inform the member about the eject by invoking the ejection * callback registered with the reason UKNOWN */ /* The below logic is same for the leave as well so we just * fall through the case */ _clGmsGetThisNodeInfo(&thisGmsClusterNode); if( req_exec_gms_nodejoin.specificMessage.gmsClusterNode.nodeId == thisGmsClusterNode.nodeId) { rc = _clGmsCallClusterMemberEjectCallBack( req_exec_gms_nodejoin.ejectReason); if( rc != CL_OK ) { clLog(ERROR,OPN,AIS,"_clGmsCallEjectCallBack failed with" "rc:0x%x",rc); } } case CL_GMS_CLUSTER_LEAVE_MSG: clLog(DBG,OPN,AIS, "Received cluster leave multicast message from ioc node [%#x:%#x]", req_exec_gms_nodejoin.specificMessage.gmsClusterNode.nodeAddress.iocPhyAddress.nodeAddress, req_exec_gms_nodejoin.specificMessage.gmsClusterNode.nodeAddress.iocPhyAddress.portId); rc = _clGmsEngineClusterLeave(req_exec_gms_nodejoin.gmsGroupId, req_exec_gms_nodejoin.specificMessage.gmsClusterNode.nodeId); break; case CL_GMS_GROUP_CREATE_MSG: clLog(DBG,OPN,AIS, "Received group create multicast message from ioc node [%#x:%#x]", req_exec_gms_nodejoin.specificMessage.groupMessage.gmsGroupNode.memberAddress.iocPhyAddress.nodeAddress, req_exec_gms_nodejoin.specificMessage.groupMessage.gmsGroupNode.memberAddress.iocPhyAddress.portId); rc = _clGmsEngineGroupCreate(req_exec_gms_nodejoin.specificMessage.groupMessage.groupData.groupName, req_exec_gms_nodejoin.specificMessage.groupMessage.groupData.groupParams, req_exec_gms_nodejoin.contextHandle, isLocalMsg); break; case CL_GMS_GROUP_DESTROY_MSG: clLog(DBG,OPN,AIS, "Received group destroy multicast message from ioc node [%#x:%#x]", req_exec_gms_nodejoin.specificMessage.groupMessage.gmsGroupNode.memberAddress.iocPhyAddress.nodeAddress, req_exec_gms_nodejoin.specificMessage.groupMessage.gmsGroupNode.memberAddress.iocPhyAddress.portId); rc = _clGmsEngineGroupDestroy(req_exec_gms_nodejoin.specificMessage.groupMessage.groupData.groupId, req_exec_gms_nodejoin.specificMessage.groupMessage.groupData.groupName, req_exec_gms_nodejoin.contextHandle, isLocalMsg); break; case CL_GMS_GROUP_JOIN_MSG: clLog(DBG,OPN,AIS, "Received group join multicast message from ioc node [%#x:%#x]", req_exec_gms_nodejoin.specificMessage.groupMessage.gmsGroupNode.memberAddress.iocPhyAddress.nodeAddress, req_exec_gms_nodejoin.specificMessage.groupMessage.gmsGroupNode.memberAddress.iocPhyAddress.portId); node = (ClGmsViewNodeT *) clHeapAllocate(sizeof(ClGmsViewNodeT)); if (!node) { log_printf (LOG_LEVEL_NOTICE, "clHeapAllocate failed"); goto out_delete; } else { /* FIXME: Need to verify version */ memcpy(&node->viewMember.groupMember,&req_exec_gms_nodejoin.specificMessage.groupMessage.gmsGroupNode, sizeof(ClGmsGroupMemberT)); memcpy(&node->viewMember.groupData, &req_exec_gms_nodejoin.specificMessage.groupMessage.groupData, sizeof(ClGmsGroupInfoT)); rc = _clGmsEngineGroupJoin(req_exec_gms_nodejoin.specificMessage.groupMessage.groupData.groupId, node, req_exec_gms_nodejoin.contextHandle, isLocalMsg); } break; case CL_GMS_GROUP_LEAVE_MSG: clLog(DBG,OPN,AIS, "Received group leave multicast message from ioc node [%#x:%#x]", req_exec_gms_nodejoin.specificMessage.groupMessage.gmsGroupNode.memberAddress.iocPhyAddress.nodeAddress, req_exec_gms_nodejoin.specificMessage.groupMessage.gmsGroupNode.memberAddress.iocPhyAddress.portId); rc = _clGmsEngineGroupLeave(req_exec_gms_nodejoin.specificMessage.groupMessage.groupData.groupId, req_exec_gms_nodejoin.specificMessage.groupMessage.gmsGroupNode.memberId, req_exec_gms_nodejoin.contextHandle, isLocalMsg); break; case CL_GMS_COMP_DEATH: clLog(DBG,OPN,AIS, "Received comp death multicast message"); rc = _clGmsRemoveMemberOnCompDeath(req_exec_gms_nodejoin.specificMessage.groupMessage.gmsGroupNode.memberId); break; case CL_GMS_LEADER_ELECT_MSG: clLog(DBG,OPN,AIS, "Received leader elect multicast message from ioc node [%#x:%#x]", req_exec_gms_nodejoin.specificMessage.gmsClusterNode.nodeAddress.iocPhyAddress.nodeAddress, req_exec_gms_nodejoin.specificMessage.gmsClusterNode.nodeAddress.iocPhyAddress.portId); rc = _clGmsEnginePreferredLeaderElect(req_exec_gms_nodejoin.specificMessage.gmsClusterNode, req_exec_gms_nodejoin.contextHandle, isLocalMsg); break; case CL_GMS_SYNC_MESSAGE: clLog(DBG,OPN,AIS, "Received gms synch multicast message"); rc = _clGmsEngineGroupInfoSync((ClGmsGroupSyncNotificationT *)(req_exec_gms_nodejoin.dataPtr)); clHeapFree(((ClGmsGroupSyncNotificationT *)req_exec_gms_nodejoin.dataPtr)->groupInfoList); clHeapFree(((ClGmsGroupSyncNotificationT *)req_exec_gms_nodejoin.dataPtr)->groupMemberList); clHeapFree(req_exec_gms_nodejoin.dataPtr); break; case CL_GMS_GROUP_MCAST_MSG: _clGmsEngineMcastMessageHandler( &(req_exec_gms_nodejoin.specificMessage.mcastMessage.groupInfo.gmsGroupNode), &(req_exec_gms_nodejoin.specificMessage.mcastMessage.groupInfo.groupData), req_exec_gms_nodejoin.specificMessage.mcastMessage.userDataSize, req_exec_gms_nodejoin.dataPtr); break; default: clLogMultiline(ERROR,OPN,AIS, "Openais GMS wrapper received Message wih invalid [MsgType=%x]. \n" "This could be because of multicast port clashes.", req_exec_gms_nodejoin.gmsMessageType); goto out_delete; } clLog(TRACE,OPN,AIS, "Processed the received message. Returning"); out_delete: clBufferDelete(&bufferHandle); }