/** * Multicast a message to the cluster. Errors are treated as * fatal and will exit the program. * @param msg_id * @param buf * @param len * * @return int */ int amf_msg_mcast (int msg_id, void *buf, size_t len) { struct req_exec_amf_msg msg; struct iovec iov[2]; int iov_cnt; int res; // ENTER ("%u, %p, %u", msg_id, buf, len); msg.header.size = sizeof (msg); msg.header.id = SERVICE_ID_MAKE (AMF_SERVICE, msg_id); iov[0].iov_base = (char *)&msg; iov[0].iov_len = sizeof (msg); if (buf == NULL) { msg.header.size = sizeof (msg); iov_cnt = 1; } else { msg.header.size = sizeof (msg) + len; iov[1].iov_base = buf; iov[1].iov_len = len; iov_cnt = 2; } res = totempg_groups_mcast_joined ( openais_group_handle, iov, iov_cnt, TOTEMPG_AGREED); if (res != 0) { dprintf("Unable to send %d bytes\n", msg.header.size); openais_exit_error (AIS_DONE_FATAL_ERR); } return res; }
static int ykd_attempt_send_msg (enum totem_callback_token_type type, void *context) { struct iovec iovec; struct ykd_header header; int res; header.id = YKD_HEADER_SENDSTATE; iovec.iov_base = (char *)&header; iovec.iov_len = sizeof (struct ykd_header); res = totempg_groups_mcast_joined (ykd_group_handle, &iovec, 1, TOTEMPG_AGREED); return (res); }
int main_mcast ( const struct iovec *iovec, unsigned int iov_len, unsigned int guarantee) { const struct qb_ipc_request_header *req = iovec->iov_base; int32_t service; int32_t fn_id; service = req->id >> 16; fn_id = req->id & 0xffff; if (corosync_service[service]) { icmap_fast_inc(service_stats_tx[service][fn_id]); } return (totempg_groups_mcast_joined (corosync_group_handle, iovec, iov_len, guarantee)); }
int main_mcast ( const struct iovec *iovec, unsigned int iov_len, unsigned int guarantee) { const coroipc_request_header_t *req = iovec->iov_base; int service; int fn_id; unsigned int key_incr_dummy; service = req->id >> 16; fn_id = req->id & 0xffff; if (ais_service[service]) { objdb->object_key_increment (service_stats_handle[service][fn_id], "tx", strlen("tx"), &key_incr_dummy); } return (totempg_groups_mcast_joined (corosync_group_handle, iovec, iov_len, guarantee)); }
static void message_handler_req_lib_cfg_ringreenable ( void *conn, void *msg) { struct req_exec_cfg_ringreenable req_exec_cfg_ringreenable; struct iovec iovec; ENTER(""); req_exec_cfg_ringreenable.header.size = sizeof (struct req_exec_cfg_ringreenable); req_exec_cfg_ringreenable.header.id = SERVICE_ID_MAKE (CFG_SERVICE, MESSAGE_REQ_EXEC_CFG_RINGREENABLE); message_source_set (&req_exec_cfg_ringreenable.source, conn); iovec.iov_base = &req_exec_cfg_ringreenable; iovec.iov_len = sizeof (struct req_exec_cfg_ringreenable); assert (totempg_groups_mcast_joined (openais_group_handle, &iovec, 1, TOTEMPG_SAFE) == 0); LEAVE(""); }
int clGmsSendMsg(ClGmsViewMemberT *memberNodeInfo, ClGmsGroupIdT groupId, ClGmsMessageTypeT msgType, ClGmsMemberEjectReasonT ejectReason, ClUint32T dataSize, ClPtrT dataPtr) { mar_req_header_t header = {0}; struct VDECL(req_exec_gms_nodejoin) req_exec_gms_nodejoin = {{0}}; struct iovec req_exec_gms_iovec = {0}; int result = -1; ClRcT rc = CL_OK; ClUint32T clusterVersion = 0; ClBufferHandleT bufferHandle = 0; ClUint8T *message = NULL; ClUint32T length = 0; ClPtrT temp = NULL; rc = clNodeCacheMinVersionGet(NULL, &clusterVersion); if(clusterVersion >= CL_VERSION_CODE(5, 0, 0) && clAspNativeLeaderElection()) { clLog(DBG, OPN, AIS, "Skipped sending msgtype [%d] since node cache is used to form the cluster view", msgType); return 0; } if (rc != CL_OK) { clLog(ERROR,OPN,AIS, "Error while getting version from the version cache. rc 0x%x",rc); curVer.releaseCode = CL_RELEASE_VERSION; curVer.majorVersion = CL_MAJOR_VERSION; curVer.minorVersion = CL_MINOR_VERSION; } else { curVer.releaseCode = CL_VERSION_RELEASE(clusterVersion); curVer.majorVersion = CL_VERSION_MAJOR(clusterVersion); curVer.minorVersion = CL_VERSION_MINOR(clusterVersion); } /* Get the version and send it */ req_exec_gms_nodejoin.version.releaseCode = curVer.releaseCode; req_exec_gms_nodejoin.version.majorVersion = curVer.majorVersion; req_exec_gms_nodejoin.version.minorVersion = curVer.minorVersion; /* For now we send message without caring about version. Later on * we need to change it accordingly */ switch(msgType) { case CL_GMS_CLUSTER_JOIN_MSG: case CL_GMS_CLUSTER_LEAVE_MSG: case CL_GMS_CLUSTER_EJECT_MSG: clLog(DBG,OPN,AIS, "Sending cluster %s multicast message", msgType == CL_GMS_CLUSTER_JOIN_MSG ? "join": msgType == CL_GMS_CLUSTER_LEAVE_MSG ? "leave" : "eject"); req_exec_gms_nodejoin.ejectReason = ejectReason; memcpy (&req_exec_gms_nodejoin.specificMessage.gmsClusterNode, &memberNodeInfo->clusterMember, sizeof (ClGmsClusterMemberT)); req_exec_gms_nodejoin.contextHandle = memberNodeInfo->contextHandle; break; case CL_GMS_GROUP_CREATE_MSG: case CL_GMS_GROUP_DESTROY_MSG: case CL_GMS_GROUP_JOIN_MSG: case CL_GMS_GROUP_LEAVE_MSG: clLog(DBG,OPN,AIS, "Sending group %s multicast message", msgType == CL_GMS_GROUP_CREATE_MSG ? "create" : msgType == CL_GMS_GROUP_DESTROY_MSG ? "destroy" : msgType == CL_GMS_GROUP_JOIN_MSG ? "join" : "leave"); memcpy (&req_exec_gms_nodejoin.specificMessage.groupMessage.gmsGroupNode, &memberNodeInfo->groupMember, sizeof (ClGmsGroupMemberT)); memcpy (&req_exec_gms_nodejoin.specificMessage.groupMessage.groupData, &memberNodeInfo->groupData, sizeof(ClGmsGroupInfoT)); req_exec_gms_nodejoin.contextHandle = memberNodeInfo->contextHandle; break; case CL_GMS_COMP_DEATH: clLog(DBG,OPN,AIS, "Sending comp death multicast message"); memcpy (&req_exec_gms_nodejoin.specificMessage.groupMessage.gmsGroupNode, &memberNodeInfo->groupMember, sizeof (ClGmsGroupMemberT)); req_exec_gms_nodejoin.contextHandle = memberNodeInfo->contextHandle; break; case CL_GMS_LEADER_ELECT_MSG: clLog(DBG,OPN,AIS, "Sending leader elect multicast message"); memcpy (&req_exec_gms_nodejoin.specificMessage.gmsClusterNode, &memberNodeInfo->clusterMember, sizeof (ClGmsClusterMemberT)); req_exec_gms_nodejoin.contextHandle = memberNodeInfo->contextHandle; break; case CL_GMS_SYNC_MESSAGE: clLog(DBG,OPN,AIS, "Sending gms synch multicast message"); req_exec_gms_nodejoin.dataPtr = dataPtr; break; case CL_GMS_GROUP_MCAST_MSG: memcpy (&req_exec_gms_nodejoin.specificMessage.mcastMessage.groupInfo.gmsGroupNode, &memberNodeInfo->groupMember, sizeof (ClGmsGroupMemberT)); memcpy (&req_exec_gms_nodejoin.specificMessage.mcastMessage.groupInfo.groupData, &memberNodeInfo->groupData, sizeof(ClGmsGroupInfoT)); req_exec_gms_nodejoin.contextHandle = memberNodeInfo->contextHandle; req_exec_gms_nodejoin.specificMessage.mcastMessage.userDataSize = dataSize; req_exec_gms_nodejoin.dataPtr = dataPtr; break; default: clLog(DBG,OPN,AIS, "Requested wrong message to be multicasted. Message type %d", msgType); return CL_GMS_RC(CL_ERR_INVALID_PARAMETER); } req_exec_gms_nodejoin.gmsMessageType = msgType; req_exec_gms_nodejoin.gmsGroupId = groupId; /* Create a buffer handle and marshall the eliments */ rc = clBufferCreate(&bufferHandle); if (rc != CL_OK) { clLogError(OPN,AIS, "Failed to create buffer while sending message on totem. rc 0x%x",rc); return rc; } rc = marshallReqExecGmsNodeJoin(&req_exec_gms_nodejoin,bufferHandle); if (rc != CL_OK) { clLogError(OPN,AIS, "Failed to marshall the data while sending message on totem. rc 0x%x",rc); goto buffer_delete_return; } rc = clBufferLengthGet(bufferHandle, &length); if (rc != CL_OK) { clLogError(OPN,AIS, "Failed to get buffer length. rc 0x%x",rc); goto buffer_delete_return; } rc = clBufferFlatten(bufferHandle, &message); if (rc != CL_OK) { clLogError(OPN,AIS, "clBufferFlatten failed with rc 0x%x",rc); goto buffer_delete_return; } header.id = SERVICE_ID_MAKE (GMS_SERVICE, MESSAGE_REQ_EXEC_GMS_NODEJOIN); header.size = length + sizeof(mar_req_header_t); /* We need to prepend the total message length in the beginning of the * message so that we can find the length while unmarshalling */ temp = clHeapAllocate(header.size); if (temp == NULL) { clLogError(OPN,AIS, "Failed to allocate memory while sending the message"); goto buffer_delete_return; } memcpy(temp,&header, sizeof(mar_req_header_t)); memcpy(temp+sizeof(mar_req_header_t), message, length); req_exec_gms_iovec.iov_base = temp; req_exec_gms_iovec.iov_len = length + sizeof(mar_req_header_t); result = totempg_groups_mcast_joined (openais_group_handle, &req_exec_gms_iovec, 1, TOTEMPG_AGREED); clLog(DBG,OPN,AIS, "Done with sending multicast message of type %d",msgType); buffer_delete_return: if (message != NULL) clHeapFree(message); if (temp != NULL) clHeapFree(temp); clBufferDelete(&bufferHandle); return result; }
static int gms_nodejoin_send (void) { /* For now this function sends only latest version. It needs to be * modified in future when version changes */ /* Send the join message with given version */ mar_req_header_t header = {0}; struct VDECL(req_exec_gms_nodejoin) req_exec_gms_nodejoin; struct iovec req_exec_gms_iovec; ClGmsClusterMemberT thisGmsClusterNode; int result; ClRcT rc = CL_OK; ClUint32T clusterVersion; ClBufferHandleT bufferHandle = 0; ClUint8T *message = NULL; ClPtrT temp = NULL; ClUint32T length = 0; rc = clNodeCacheMinVersionGet(NULL, &clusterVersion); if (rc != CL_OK) { clLog(ERROR,OPN,AIS, "Error while getting version from the version cache. rc 0x%x",rc); curVer.releaseCode = CL_RELEASE_VERSION; curVer.majorVersion = CL_MAJOR_VERSION; curVer.minorVersion = CL_MINOR_VERSION; } else { curVer.releaseCode = CL_VERSION_RELEASE(clusterVersion); curVer.majorVersion = CL_VERSION_MAJOR(clusterVersion); curVer.minorVersion = CL_VERSION_MINOR(clusterVersion); } clLog(DBG,OPN,AIS, "This node is sending join message for version %d, %d, %d", curVer.releaseCode, curVer.majorVersion, curVer.minorVersion); /* Get the version and send it */ req_exec_gms_nodejoin.version.releaseCode = curVer.releaseCode; req_exec_gms_nodejoin.version.majorVersion = curVer.majorVersion; req_exec_gms_nodejoin.version.minorVersion = curVer.minorVersion; _clGmsGetThisNodeInfo(&thisGmsClusterNode); // node join is send for default cluster group - 0 req_exec_gms_nodejoin.gmsGroupId = 0; memcpy (&req_exec_gms_nodejoin.specificMessage.gmsClusterNode, &thisGmsClusterNode, sizeof (ClGmsClusterMemberT)); req_exec_gms_nodejoin.gmsMessageType = CL_GMS_CLUSTER_JOIN_MSG; /* Create a buffer handle and marshall the elements */ rc = clBufferCreate(&bufferHandle); if (rc != CL_OK) { clLogError(OPN,AIS, "Failed to create buffer while sending message on totem. rc 0x%x",rc); return rc; } rc = marshallReqExecGmsNodeJoin(&req_exec_gms_nodejoin,bufferHandle); if (rc != CL_OK) { clLogError(OPN,AIS, "Failed to marshall the data while sending message on totem. rc 0x%x",rc); goto buffer_delete_return; } rc = clBufferLengthGet(bufferHandle, &length); if (rc != CL_OK) { clLogError(OPN,AIS, "Failed to get buffer length. rc 0x%x",rc); goto buffer_delete_return; } rc = clBufferFlatten(bufferHandle, &message); if (rc != CL_OK) { clLogError(OPN,AIS, "clBufferFlatten failed with rc 0x%x",rc); goto buffer_delete_return; } /* We need to prepend the total message length in the beginning of the * message so that we can find the length while unmarshalling */ temp = clHeapAllocate(length + sizeof(mar_req_header_t)); if (temp == NULL) { clLogError(OPN,AIS, "Failed to allocate memory while sending the message"); goto buffer_delete_return; } header.id = SERVICE_ID_MAKE (GMS_SERVICE, MESSAGE_REQ_EXEC_GMS_NODEJOIN); header.size = length + sizeof(mar_req_header_t); memcpy(temp,&header,sizeof(mar_req_header_t)); memcpy(temp+sizeof(mar_req_header_t), message, length); req_exec_gms_iovec.iov_base = temp; req_exec_gms_iovec.iov_len = length + sizeof(mar_req_header_t); clLog(DBG,OPN,AIS, "Sending node join from this node in sync_process"); result = totempg_groups_mcast_joined (openais_group_handle, &req_exec_gms_iovec, 1, TOTEMPG_AGREED); buffer_delete_return: if (message != NULL) clHeapFree(message); if (temp != NULL) clHeapFree(temp); clBufferDelete(&bufferHandle); return result; }