static void __osmv_dispatch_rmpp_mad(IN osm_bind_handle_t h_bind, IN const ib_mad_t * p_mad, IN osmv_txn_ctx_t * p_txn, IN const osm_mad_addr_t * p_mad_addr) { ib_api_status_t status = IB_SUCCESS; uint64_t key = cl_ntoh64(p_mad->trans_id); boolean_t is_init_by_peer = FALSE; osmv_bind_obj_t *p_bo = (osmv_bind_obj_t *) h_bind; osm_madw_t *p_madw; OSM_LOG_ENTER(p_bo->p_vendor->p_log); if (NULL == p_txn) { if (FALSE == osmv_rmpp_is_data(p_mad) || FALSE == osmv_rmpp_is_first(p_mad)) { osm_log(p_bo->p_vendor->p_log, OSM_LOG_DEBUG, "The MAD does not match any transaction " "and does not start a sender-initiated RMPP transfer.\n"); goto dispatch_rmpp_mad_done; } /* IB Spec 13.6.2.2. This is a Sender Initiated Transfer. My peer is the requester and RMPP Sender. I am the RMPP Receiver. */ status = osmv_txn_init(h_bind, /*tid==key */ key, key, &p_txn); if (IB_SUCCESS != status) { goto dispatch_rmpp_mad_done; } is_init_by_peer = TRUE; osm_log(p_bo->p_vendor->p_log, OSM_LOG_DEBUG, "A new sender-initiated transfer (TID=0x%" PRIx64 ") started\n", key); } if (OSMV_TXN_RMPP_NONE == osmv_txn_get_rmpp_state(p_txn)) { /* Case 1: Fall through from above. * Case 2: When the transaction was initiated by me * (a single request MAD), there was an uncertainty * whether the reply will be RMPP. Now it's resolved, * since the reply is RMPP! */ status = osmv_txn_init_rmpp_receiver(h_bind, p_txn, is_init_by_peer); if (IB_SUCCESS != status) { goto dispatch_rmpp_mad_done; } } switch (osmv_txn_get_rmpp_state(p_txn)) { case OSMV_TXN_RMPP_RECEIVER: status = __osmv_dispatch_rmpp_rcv(h_bind, p_mad, p_txn, p_mad_addr); if (IB_SUCCESS != status) { if (FALSE == osmv_txn_is_rmpp_init_by_peer(p_txn)) { /* This is a requester, still waiting for the reply. Apply the callback */ /* update the status of the p_madw */ p_madw = osmv_txn_get_madw(p_txn); p_madw->status = status; p_bo->send_err_cb(p_bo->cb_context, p_madw); } /* ABORT/STOP/LOCAL ERROR */ osmv_txn_done(h_bind, osmv_txn_get_key(p_txn), FALSE); } break; case OSMV_TXN_RMPP_SENDER: __osmv_dispatch_rmpp_snd(h_bind, p_mad, p_txn, p_mad_addr); /* If an error happens here, it's the sender thread to cleanup the txn */ break; default: CL_ASSERT(FALSE); } dispatch_rmpp_mad_done: OSM_LOG_EXIT(p_bo->p_vendor->p_log); }
/***************************************************************************** This routine needs to be invoked on every send - since the SM LID and Local lid might change. To do that without any major perfoermance impact we cache the results and time they were obtained. Refresh only twice a minute. To avoid the need to use statics and risk a race - we require the refresh time to be stored in the context of the results. Also this coveres cases were we query for multiple guids. *****************************************************************************/ ib_api_status_t __osmv_get_lid_and_sm_lid_by_port_guid(IN osm_vendor_t * const p_vend, IN ib_net64_t port_guid, IN OUT uint64_t * p_lids_update_time_sec, OUT uint16_t * lid, OUT uint16_t * sm_lid) { ib_api_status_t status; ib_port_attr_t *p_attr_array; uint32_t num_ports; uint32_t port_num; OSM_LOG_ENTER(p_vend->p_log); /* use prevous values if current time is close enough to previous query */ if (cl_get_time_stamp_sec() <= *p_lids_update_time_sec + 30) { osm_log(p_vend->p_log, OSM_LOG_DEBUG, "__osmv_get_lid_and_sm_lid_by_port_guid: " "Using previously stored lid:0x%04x sm_lid:0x%04x\n", *lid, *sm_lid); status = IB_SUCCESS; goto Exit; } /* obtain the number of available ports */ num_ports = 0; status = osm_vendor_get_all_port_attr(p_vend, NULL, &num_ports); if (status != IB_INSUFFICIENT_MEMORY) { osm_log(p_vend->p_log, OSM_LOG_ERROR, "__osmv_get_lid_and_sm_lid_by_port_guid: ERR 0503: " "expected to get the IB_INSUFFICIENT_MEMORY but got: %s\n", ib_get_err_str(status) ); status = IB_ERROR; goto Exit; } osm_log(p_vend->p_log, OSM_LOG_DEBUG, "__osmv_get_lid_and_sm_lid_by_port_guid: " "Found total of %u ports. Looking for guid:0x%016" PRIx64 "\n", num_ports, cl_ntoh64(port_guid) ); /* allocate the attributes */ p_attr_array = (ib_port_attr_t *) malloc(sizeof(ib_port_attr_t) * num_ports); /* obtain the attributes */ status = osm_vendor_get_all_port_attr(p_vend, p_attr_array, &num_ports); if (status != IB_SUCCESS) { osm_log(p_vend->p_log, OSM_LOG_ERROR, "__osmv_get_lid_and_sm_lid_by_port_guid: ERR 0504: " "Fail to get port attributes (error: %s)\n", ib_get_err_str(status) ); free(p_attr_array); goto Exit; } status = IB_ERROR; /* find the port requested in the list */ for (port_num = 0; (port_num < num_ports) && (status == IB_ERROR); port_num++) { if (p_attr_array[port_num].port_guid == port_guid) { *lid = p_attr_array[port_num].lid; *sm_lid = p_attr_array[port_num].sm_lid; *p_lids_update_time_sec = cl_get_time_stamp_sec(); status = IB_SUCCESS; osm_log(p_vend->p_log, OSM_LOG_DEBUG, "__osmv_get_lid_and_sm_lid_by_port_guid: " "Found guid:0x%016" PRIx64 " with idx:%d\n", cl_ntoh64(port_guid), port_num); } } free(p_attr_array); Exit: OSM_LOG_EXIT(p_vend->p_log); return (status); }
/********************************************************************** Initiates a lightweight sweep of the subnet. Used during normal sweeps after the subnet is up. **********************************************************************/ static ib_api_status_t state_mgr_light_sweep_start(IN osm_sm_t * sm) { ib_api_status_t status = IB_SUCCESS; osm_bind_handle_t h_bind; cl_qmap_t *p_sw_tbl; cl_map_item_t *p_next; osm_node_t *p_node; osm_physp_t *p_physp; uint8_t port_num; OSM_LOG_ENTER(sm->p_log); p_sw_tbl = &sm->p_subn->sw_guid_tbl; /* * First, get the bind handle. */ h_bind = osm_sm_mad_ctrl_get_bind_handle(&sm->mad_ctrl); if (h_bind == OSM_BIND_INVALID_HANDLE) { OSM_LOG(sm->p_log, OSM_LOG_DEBUG, "No bound ports. Deferring sweep...\n"); status = IB_INVALID_STATE; goto _exit; } OSM_LOG_MSG_BOX(sm->p_log, OSM_LOG_VERBOSE, "INITIATING LIGHT SWEEP"); CL_PLOCK_ACQUIRE(sm->p_lock); cl_qmap_apply_func(p_sw_tbl, state_mgr_get_sw_info, sm); CL_PLOCK_RELEASE(sm->p_lock); CL_PLOCK_ACQUIRE(sm->p_lock); cl_qmap_apply_func(&sm->p_subn->node_guid_tbl, state_mgr_get_node_desc, sm); CL_PLOCK_RELEASE(sm->p_lock); /* now scan the list of physical ports that were not down but have no remote port */ CL_PLOCK_ACQUIRE(sm->p_lock); p_next = cl_qmap_head(&sm->p_subn->node_guid_tbl); while (p_next != cl_qmap_end(&sm->p_subn->node_guid_tbl)) { p_node = (osm_node_t *) p_next; p_next = cl_qmap_next(p_next); for (port_num = 1; port_num < osm_node_get_num_physp(p_node); port_num++) { p_physp = osm_node_get_physp_ptr(p_node, port_num); if (p_physp && (osm_physp_get_port_state(p_physp) != IB_LINK_DOWN) && !osm_physp_get_remote(p_physp)) { OSM_LOG(sm->p_log, OSM_LOG_ERROR, "ERR 3315: " "Unknown remote side for node 0x%016" PRIx64 " (%s) port %u. Adding to light sweep sampling list\n", cl_ntoh64(osm_node_get_node_guid (p_node)), p_node->print_desc, port_num); osm_dump_dr_path(sm->p_log, osm_physp_get_dr_path_ptr (p_physp), OSM_LOG_ERROR); state_mgr_get_remote_port_info(sm, p_physp); } } } cl_qmap_apply_func(&sm->p_subn->sm_guid_tbl, query_sm_info, sm); CL_PLOCK_RELEASE(sm->p_lock); _exit: OSM_LOG_EXIT(sm->p_log); return status; }
static int set_lft_block(IN osm_switch_t *p_sw, IN osm_ucast_mgr_t *p_mgr, IN uint16_t block_id_ho) { osm_madw_context_t context; osm_dr_path_t *p_path; osm_physp_t *p_physp; ib_api_status_t status; /* Send linear forwarding table blocks to the switch as long as the switch indicates it has blocks needing configuration. */ if (!p_sw->new_lft) { /* any routing should provide the new_lft */ CL_ASSERT(p_mgr->p_subn->opt.use_ucast_cache && p_mgr->cache_valid && !p_sw->need_update); return -1; } p_physp = osm_node_get_physp_ptr(p_sw->p_node, 0); if (!p_physp) return -1; p_path = osm_physp_get_dr_path_ptr(p_physp); context.lft_context.node_guid = osm_node_get_node_guid(p_sw->p_node); context.lft_context.set_method = TRUE; if (!p_sw->need_update && !p_mgr->p_subn->need_update && !memcmp(p_sw->new_lft + block_id_ho * IB_SMP_DATA_SIZE, p_sw->lft + block_id_ho * IB_SMP_DATA_SIZE, IB_SMP_DATA_SIZE)) return 0; /* * Zero the stored LFT block, so in case the MAD will end up * with error, we will resend it in the next sweep. */ memset(p_sw->lft + block_id_ho * IB_SMP_DATA_SIZE, 0, IB_SMP_DATA_SIZE); OSM_LOG(p_mgr->p_log, OSM_LOG_DEBUG, "Writing FT block %u to switch 0x%" PRIx64 "\n", block_id_ho, cl_ntoh64(context.lft_context.node_guid)); status = osm_req_set(p_mgr->sm, p_path, p_sw->new_lft + block_id_ho * IB_SMP_DATA_SIZE, IB_SMP_DATA_SIZE, IB_MAD_ATTR_LIN_FWD_TBL, cl_hton32(block_id_ho), FALSE, ib_port_info_get_m_key(&p_physp->port_info), CL_DISP_MSGID_NONE, &context); if (status != IB_SUCCESS) { OSM_LOG(p_mgr->p_log, OSM_LOG_ERROR, "ERR 3A10: " "Sending linear fwd. tbl. block failed (%s)\n", ib_get_err_str(status)); return -1; } return 0; }
osm_bind_handle_t osm_vendor_bind(IN osm_vendor_t * const p_vend, IN osm_bind_info_t * const p_user_bind, IN osm_mad_pool_t * const p_mad_pool, IN osm_vend_mad_recv_callback_t mad_recv_callback, IN osm_vend_mad_send_err_callback_t send_err_callback, IN void *context) { ib_net64_t port_guid; osm_al_bind_info_t *p_bind = 0; ib_api_status_t status; ib_qp_create_t qp_create; ib_mad_svc_t mad_svc; ib_av_attr_t av; OSM_LOG_ENTER(p_vend->p_log); CL_ASSERT(p_user_bind); CL_ASSERT(p_mad_pool); CL_ASSERT(mad_recv_callback); CL_ASSERT(send_err_callback); port_guid = p_user_bind->port_guid; osm_log(p_vend->p_log, OSM_LOG_INFO, "osm_vendor_bind: " "Binding to port 0x%" PRIx64 ".\n", cl_ntoh64(port_guid)); if (p_vend->h_ca == 0) { osm_log(p_vend->p_log, OSM_LOG_DEBUG, "osm_vendor_bind: " "Opening CA that owns port 0x%" PRIx64 ".\n", port_guid); status = __osm_vendor_open_ca(p_vend, port_guid); if (status != IB_SUCCESS) { osm_log(p_vend->p_log, OSM_LOG_ERROR, "osm_vendor_bind: ERR 3B17: " "Unable to Open CA (%s).\n", ib_get_err_str(status)); goto Exit; } } p_bind = malloc(sizeof(*p_bind)); if (p_bind == NULL) { osm_log(p_vend->p_log, OSM_LOG_ERROR, "osm_vendor_bind: ERR 3B18: " "Unable to allocate internal bind object.\n"); goto Exit; } memset(p_bind, 0, sizeof(*p_bind)); p_bind->p_vend = p_vend; p_bind->client_context = context; p_bind->port_num = osm_vendor_get_port_num(p_vend, port_guid); p_bind->rcv_callback = mad_recv_callback; p_bind->send_err_callback = send_err_callback; p_bind->p_osm_pool = p_mad_pool; CL_ASSERT(p_bind->port_num); /* Get the proper QP. */ memset(&qp_create, 0, sizeof(qp_create)); switch (p_user_bind->mad_class) { case IB_MCLASS_SUBN_LID: case IB_MCLASS_SUBN_DIR: qp_create.qp_type = IB_QPT_QP0_ALIAS; break; case IB_MCLASS_SUBN_ADM: default: qp_create.qp_type = IB_QPT_QP1_ALIAS; break; } qp_create.sq_depth = p_user_bind->send_q_size; qp_create.rq_depth = p_user_bind->recv_q_size; qp_create.sq_sge = OSM_AL_SQ_SGE; qp_create.rq_sge = OSM_AL_RQ_SGE; status = ib_get_spl_qp(p_vend->h_pd, port_guid, &qp_create, p_bind, __osm_al_err_callback, &p_bind->pool_key, &p_bind->h_qp); if (status != IB_SUCCESS) { free(p_bind); osm_log(p_vend->p_log, OSM_LOG_ERROR, "osm_vendor_bind: ERR 3B19: " "Unable to get QP handle (%s).\n", ib_get_err_str(status)); goto Exit; } CL_ASSERT(p_bind->h_qp); CL_ASSERT(p_bind->pool_key); memset(&mad_svc, 0, sizeof(mad_svc)); mad_svc.mad_svc_context = p_bind; mad_svc.pfn_mad_send_cb = __osm_al_send_callback; mad_svc.pfn_mad_recv_cb = __osm_al_rcv_callback; mad_svc.mgmt_class = p_user_bind->mad_class; mad_svc.mgmt_version = p_user_bind->class_version; mad_svc.support_unsol = p_user_bind->is_responder; mad_svc.method_array[IB_MAD_METHOD_GET] = TRUE; mad_svc.method_array[IB_MAD_METHOD_SET] = TRUE; mad_svc.method_array[IB_MAD_METHOD_DELETE] = TRUE; mad_svc.method_array[IB_MAD_METHOD_TRAP] = TRUE; mad_svc.method_array[IB_MAD_METHOD_GETTABLE] = TRUE; status = ib_reg_mad_svc(p_bind->h_qp, &mad_svc, &p_bind->h_svc); if (status != IB_SUCCESS) { free(p_bind); osm_log(p_vend->p_log, OSM_LOG_ERROR, "osm_vendor_bind: ERR 3B21: " "Unable to register QP0 MAD service (%s).\n", ib_get_err_str(status)); goto Exit; } __osm_vendor_init_av(p_bind, &av); status = ib_create_av(p_vend->h_pd, &av, &p_bind->h_dr_av); if (status != IB_SUCCESS) { osm_log(p_vend->p_log, OSM_LOG_ERROR, "osm_vendor_bind: ERR 3B22: " "Unable to create address vector (%s).\n", ib_get_err_str(status)); goto Exit; } if (osm_log_is_active(p_vend->p_log, OSM_LOG_DEBUG)) { osm_log(p_vend->p_log, OSM_LOG_DEBUG, "osm_vendor_bind: " "Allocating av handle %p.\n", p_bind->h_dr_av); } Exit: OSM_LOG_EXIT(p_vend->p_log); return ((osm_bind_handle_t) p_bind); }
void osm_pkey_rec_rcv_process(IN void *ctx, IN void *data) { osm_sa_t *sa = ctx; osm_madw_t *p_madw = data; const ib_sa_mad_t *p_rcvd_mad; const ib_pkey_table_record_t *p_rcvd_rec; const osm_port_t *p_port = NULL; cl_qlist_t rec_list; osm_pkey_search_ctxt_t context; ib_net64_t comp_mask; osm_physp_t *p_req_physp; CL_ASSERT(sa); OSM_LOG_ENTER(sa->p_log); CL_ASSERT(p_madw); p_rcvd_mad = osm_madw_get_sa_mad_ptr(p_madw); p_rcvd_rec = (ib_pkey_table_record_t *) ib_sa_mad_get_payload_ptr(p_rcvd_mad); comp_mask = p_rcvd_mad->comp_mask; CL_ASSERT(p_rcvd_mad->attr_id == IB_MAD_ATTR_PKEY_TBL_RECORD); /* we only support SubnAdmGet and SubnAdmGetTable methods */ if (p_rcvd_mad->method != IB_MAD_METHOD_GET && p_rcvd_mad->method != IB_MAD_METHOD_GETTABLE) { OSM_LOG(sa->p_log, OSM_LOG_ERROR, "ERR 4605: " "Unsupported Method (%s)\n", ib_get_sa_method_str(p_rcvd_mad->method)); osm_sa_send_error(sa, p_madw, IB_MAD_STATUS_UNSUP_METHOD_ATTR); goto Exit; } /* p922 - P_KeyTableRecords shall only be provided in response to trusted requests. Check that the requester is a trusted one. */ if (p_rcvd_mad->sm_key != sa->p_subn->opt.sa_key) { /* This is not a trusted requester! */ OSM_LOG(sa->p_log, OSM_LOG_ERROR, "ERR 4608: " "Request from non-trusted requester: " "Given SM_Key:0x%016" PRIx64 "\n", cl_ntoh64(p_rcvd_mad->sm_key)); osm_sa_send_error(sa, p_madw, IB_SA_MAD_STATUS_REQ_INVALID); goto Exit; } /* update the requester physical port */ p_req_physp = osm_get_physp_by_mad_addr(sa->p_log, sa->p_subn, osm_madw_get_mad_addr_ptr (p_madw)); if (p_req_physp == NULL) { OSM_LOG(sa->p_log, OSM_LOG_ERROR, "ERR 4604: " "Cannot find requester physical port\n"); goto Exit; } OSM_LOG(sa->p_log, OSM_LOG_DEBUG, "Requester port GUID 0x%" PRIx64 "\n", cl_ntoh64(osm_physp_get_port_guid(p_req_physp))); cl_qlist_init(&rec_list); context.p_rcvd_rec = p_rcvd_rec; context.p_list = &rec_list; context.comp_mask = p_rcvd_mad->comp_mask; context.sa = sa; context.block_num = cl_ntoh16(p_rcvd_rec->block_num); context.p_req_physp = p_req_physp; OSM_LOG(sa->p_log, OSM_LOG_DEBUG, "Got Query Lid:%u(%02X), Block:0x%02X(%02X), Port:0x%02X(%02X)\n", cl_ntoh16(p_rcvd_rec->lid), (comp_mask & IB_PKEY_COMPMASK_LID) != 0, p_rcvd_rec->port_num, (comp_mask & IB_PKEY_COMPMASK_PORT) != 0, context.block_num, (comp_mask & IB_PKEY_COMPMASK_BLOCK) != 0); cl_plock_acquire(sa->p_lock); /* If the user specified a LID, it obviously narrows our work load, since we don't have to search every port */ if (comp_mask & IB_PKEY_COMPMASK_LID) { p_port = osm_get_port_by_lid(sa->p_subn, p_rcvd_rec->lid); if (!p_port) OSM_LOG(sa->p_log, OSM_LOG_ERROR, "ERR 460B: " "No port found with LID %u\n", cl_ntoh16(p_rcvd_rec->lid)); else sa_pkey_by_comp_mask(sa, p_port, &context); } else cl_qmap_apply_func(&sa->p_subn->port_guid_tbl, sa_pkey_by_comp_mask_cb, &context); cl_plock_release(sa->p_lock); osm_sa_respond(sa, p_madw, sizeof(ib_pkey_table_record_t), &rec_list); Exit: OSM_LOG_EXIT(sa->p_log); }
static ib_api_status_t __osmv_get_send_txn(IN osm_bind_handle_t h_bind, IN osm_madw_t * const p_madw, IN boolean_t is_rmpp, IN boolean_t resp_expected, OUT osmv_txn_ctx_t ** pp_txn) { ib_api_status_t ret; uint64_t tid, key; osmv_bind_obj_t *p_bo = (osmv_bind_obj_t *) h_bind; ib_mad_t *p_mad = osm_madw_get_mad_ptr(p_madw); OSM_LOG_ENTER(p_bo->p_vendor->p_log); CL_ASSERT(NULL != pp_txn); key = tid = cl_ntoh64(p_mad->trans_id); if (TRUE == resp_expected) { /* Create a unique identifier at the requester side */ key = osmv_txn_uniq_key(tid); } /* We must run under a transaction framework */ ret = osmv_txn_lookup(h_bind, key, pp_txn); if (IB_NOT_FOUND == ret) { /* Generally, we start a new transaction */ ret = osmv_txn_init(h_bind, tid, key, pp_txn); if (IB_SUCCESS != ret) { osm_log(p_bo->p_vendor->p_log, OSM_LOG_ERROR, "__osmv_get_send_txn: ERR 7313: " "The transaction id=0x%" PRIx64 " failed to init.\n", tid); goto get_send_txn_done; } } else { CL_ASSERT(NULL != *pp_txn); /* The transaction context exists. * This is legal only if I am going to return an * (RMPP?) reply to an RMPP request sent by the other part * (double-sided RMPP transfer) */ if (FALSE == is_rmpp || FALSE == osmv_txn_is_rmpp_init_by_peer(*pp_txn)) { osm_log(p_bo->p_vendor->p_log, OSM_LOG_ERROR, "__osmv_get_send_txn: ERR 7314: " "The transaction id=0x%" PRIx64 " is not unique. Send failed.\n", tid); ret = IB_INVALID_SETTING; goto get_send_txn_done; } if (TRUE == resp_expected) { osm_log(p_bo->p_vendor->p_log, OSM_LOG_ERROR, "__osmv_get_send_txn: ERR 7315: " "The transaction id=0x%" PRIx64 " can't expect a response. Send failed.\n", tid); ret = IB_INVALID_PARAMETER; goto get_send_txn_done; } } if (TRUE == is_rmpp) { ret = osmv_txn_init_rmpp_sender(h_bind, *pp_txn, p_madw); if (IB_SUCCESS != ret) { osm_log(p_bo->p_vendor->p_log, OSM_LOG_ERROR, "__osmv_get_send_txn: ERR 7316: " "The transaction id=0x%" PRIx64 " failed to init the rmpp mad. Send failed.\n", tid); osmv_txn_done(h_bind, tid, FALSE); goto get_send_txn_done; } } /* Save a reference to the MAD in the txn context * We'll need to match it in two cases: * (1) When the response is returned, if I am the requester * (2) In RMPP retransmissions */ osmv_txn_set_madw(*pp_txn, p_madw); get_send_txn_done: OSM_LOG_EXIT(p_bo->p_vendor->p_log); return ret; }
ib_api_status_t osmv_transport_mad_send(IN const osm_bind_handle_t h_bind, IN void *p_mad, IN const osm_mad_addr_t * p_mad_addr) { osmv_bind_obj_t *p_bo = (osmv_bind_obj_t *) h_bind; osm_vendor_t const *p_vend = p_bo->p_vendor; int ret; ibms_mad_msg_t mad_msg; ib_api_status_t status; const ib_mad_t *p_mad_hdr = p_mad; OSM_LOG_ENTER(p_vend->p_log); memset(&mad_msg, 0, sizeof(mad_msg)); /* Make sure the p_bo object is still relevant */ if ((p_bo->magic_ptr != p_bo) || p_bo->is_closing) return IB_INVALID_CALLBACK; /* * Copy the MAD over to the sent mad */ memcpy(&mad_msg.header, p_mad_hdr, MAD_BLOCK_SIZE); /* * For all sends other than directed route SM MADs, * acquire an address vector for the destination. */ if (p_mad_hdr->mgmt_class != IB_MCLASS_SUBN_DIR) { __osmv_ibms_osm_addr_to_mad_addr(p_mad_addr, p_mad_hdr->mgmt_class == IB_MCLASS_SUBN_LID, &mad_msg.addr); } else { /* is a directed route - we need to construct a permissive address */ /* we do not need port number since it is part of the mad_hndl */ mad_msg.addr.dlid = IB_LID_PERMISSIVE; mad_msg.addr.slid = IB_LID_PERMISSIVE; mad_msg.addr.sqpn = 0; mad_msg.addr.dqpn = 0; } osm_log(p_bo->p_vendor->p_log, OSM_LOG_DEBUG, "osmv_transport_mad_send: " "Sending QPN:%d DLID:0x%04x class:0x%02x " "method:0x%02x attr:0x%04x status:0x%04x " "tid:0x%016" PRIx64 "\n", mad_msg.addr.dqpn, cl_ntoh16(mad_msg.addr.dlid), mad_msg.header.mgmt_class, mad_msg.header.method, cl_ntoh16(mad_msg.header.attr_id), cl_ntoh16(mad_msg.header.status), cl_ntoh64(mad_msg.header.trans_id) ); /* send it */ ret = ibms_send(((osmv_ibms_transport_mgr_t *) (p_bo->p_transp_mgr))-> conHdl, &mad_msg); if (ret) { osm_log(p_vend->p_log, OSM_LOG_ERROR, "osmv_transport_mad_send: ERR 5304: " "Error sending mad (%d).\n", ret); status = IB_ERROR; goto Exit; } status = IB_SUCCESS; Exit: OSM_LOG_EXIT(p_vend->p_log); return (status); }
static int do_ucast_file_load(void *context) { char line[1024]; char *file_name; FILE *file; ib_net64_t sw_guid, port_guid; osm_opensm_t *p_osm = context; osm_switch_t *p_sw; uint16_t lid; uint8_t port_num; unsigned lineno; file_name = p_osm->subn.opt.lfts_file; if (!file_name) { OSM_LOG(&p_osm->log, OSM_LOG_VERBOSE, "LFTs file name is not given; " "using default routing algorithm\n"); return 1; } file = fopen(file_name, "r"); if (!file) { OSM_LOG(&p_osm->log, OSM_LOG_ERROR | OSM_LOG_SYS, "ERR 6302: " "cannot open ucast dump file \'%s\': %m\n", file_name); return -1; } lineno = 0; p_sw = NULL; while (fgets(line, sizeof(line) - 1, file) != NULL) { char *p, *q; lineno++; p = line; while (isspace(*p)) p++; if (*p == '#') continue; if (!strncmp(p, "Multicast mlids", 15)) { OSM_LOG(&p_osm->log, OSM_LOG_ERROR | OSM_LOG_SYS, "ERR 6303: " "Multicast dump file detected; " "skipping parsing. Using default " "routing algorithm\n"); } else if (!strncmp(p, "Unicast lids", 12)) { if (p_sw) osm_ucast_mgr_set_fwd_table(&p_osm->sm. ucast_mgr, p_sw); q = strstr(p, " guid 0x"); if (!q) { OSM_LOG(&p_osm->log, OSM_LOG_ERROR, "PARSE ERROR: %s:%u: " "cannot parse switch definition\n", file_name, lineno); return -1; } p = q + 8; sw_guid = strtoull(p, &q, 16); if (q == p || !isspace(*q)) { OSM_LOG(&p_osm->log, OSM_LOG_ERROR, "PARSE ERROR: %s:%u: " "cannot parse switch guid: \'%s\'\n", file_name, lineno, p); return -1; } sw_guid = cl_hton64(sw_guid); p_sw = osm_get_switch_by_guid(&p_osm->subn, sw_guid); if (!p_sw) { OSM_LOG(&p_osm->log, OSM_LOG_VERBOSE, "cannot find switch %016" PRIx64 "\n", cl_ntoh64(sw_guid)); continue; } memset(p_sw->new_lft, OSM_NO_PATH, IB_LID_UCAST_END_HO + 1); } else if (p_sw && !strncmp(p, "0x", 2)) { p += 2; lid = (uint16_t) strtoul(p, &q, 16); if (q == p || !isspace(*q)) { OSM_LOG(&p_osm->log, OSM_LOG_ERROR, "PARSE ERROR: %s:%u: " "cannot parse lid: \'%s\'\n", file_name, lineno, p); return -1; } p = q; while (isspace(*p)) p++; port_num = (uint8_t) strtoul(p, &q, 10); if (q == p || !isspace(*q)) { OSM_LOG(&p_osm->log, OSM_LOG_ERROR, "PARSE ERROR: %s:%u: " "cannot parse port: \'%s\'\n", file_name, lineno, p); return -1; } p = q; /* additionally try to exract guid */ q = strstr(p, " portguid 0x"); if (!q) { OSM_LOG(&p_osm->log, OSM_LOG_VERBOSE, "PARSE WARNING: %s:%u: " "cannot find port guid " "(maybe broken dump): \'%s\'\n", file_name, lineno, p); port_guid = 0; } else { p = q + 12; port_guid = strtoull(p, &q, 16); if (q == p || (!isspace(*q) && *q != ':')) { OSM_LOG(&p_osm->log, OSM_LOG_VERBOSE, "PARSE WARNING: %s:%u: " "cannot parse port guid " "(maybe broken dump): \'%s\'\n", file_name, lineno, p); port_guid = 0; } } port_guid = cl_hton64(port_guid); add_path(p_osm, p_sw, lid, port_num, port_guid); } } if (p_sw) osm_ucast_mgr_set_fwd_table(&p_osm->sm.ucast_mgr, p_sw); fclose(file); return 0; }
/* rank is a SWITCH for BFS purpose */ static int updn_subn_rank(IN updn_t * p_updn) { osm_switch_t *p_sw; osm_physp_t *p_physp, *p_remote_physp; cl_qlist_t list; cl_map_item_t *item; struct updn_node *u, *remote_u; uint8_t num_ports, port_num; osm_log_t *p_log = &p_updn->p_osm->log; unsigned max_rank = 0; OSM_LOG_ENTER(p_log); cl_qlist_init(&list); /* add all roots to the list */ for (item = cl_qmap_head(&p_updn->p_osm->subn.sw_guid_tbl); item != cl_qmap_end(&p_updn->p_osm->subn.sw_guid_tbl); item = cl_qmap_next(item)) { p_sw = (osm_switch_t *)item; u = p_sw->priv; if (!u->rank) cl_qlist_insert_tail(&list, &u->list); } /* BFS the list till it's empty */ while (!cl_is_qlist_empty(&list)) { u = (struct updn_node *)cl_qlist_remove_head(&list); /* Go over all remote nodes and rank them (if not already visited) */ p_sw = u->sw; num_ports = p_sw->num_ports; OSM_LOG(p_log, OSM_LOG_DEBUG, "Handling switch GUID 0x%" PRIx64 "\n", cl_ntoh64(osm_node_get_node_guid(p_sw->p_node))); for (port_num = 1; port_num < num_ports; port_num++) { ib_net64_t port_guid; /* Current port fetched in order to get remote side */ p_physp = osm_node_get_physp_ptr(p_sw->p_node, port_num); if (!p_physp) continue; p_remote_physp = p_physp->p_remote_physp; /* make sure that all the following occur on p_remote_physp: 1. The port isn't NULL 2. It is a switch */ if (p_remote_physp && p_remote_physp->p_node->sw) { remote_u = p_remote_physp->p_node->sw->priv; port_guid = p_remote_physp->port_guid; if (remote_u->rank > u->rank + 1) { remote_u->rank = u->rank + 1; max_rank = remote_u->rank; cl_qlist_insert_tail(&list, &remote_u->list); OSM_LOG(p_log, OSM_LOG_DEBUG, "Rank of port GUID 0x%" PRIx64 " = %u\n", cl_ntoh64(port_guid), remote_u->rank); } } } } /* Print Summary of ranking */ OSM_LOG(p_log, OSM_LOG_VERBOSE, "Subnet ranking completed. Max Node Rank = %d\n", max_rank); OSM_LOG_EXIT(p_log); return 0; }
/* Find Root nodes automatically by Min Hop Table info */ static void updn_find_root_nodes_by_min_hop(OUT updn_t * p_updn) { osm_opensm_t *p_osm = p_updn->p_osm; osm_switch_t *p_sw; osm_port_t *p_port; osm_physp_t *p_physp; cl_map_item_t *item; double thd1, thd2; unsigned i, cas_num = 0; unsigned *cas_per_sw; uint16_t lid_ho; OSM_LOG_ENTER(&p_osm->log); OSM_LOG(&p_osm->log, OSM_LOG_DEBUG, "Current number of ports in the subnet is %d\n", cl_qmap_count(&p_osm->subn.port_guid_tbl)); lid_ho = (uint16_t) cl_ptr_vector_get_size(&p_updn->p_osm->subn.port_lid_tbl) + 1; cas_per_sw = malloc(lid_ho * sizeof(*cas_per_sw)); if (!cas_per_sw) { OSM_LOG(&p_osm->log, OSM_LOG_ERROR, "ERR AA14: " "cannot alloc mem for CAs per switch counter array\n"); goto _exit; } memset(cas_per_sw, 0, lid_ho * sizeof(*cas_per_sw)); /* Find the Maximum number of CAs (and routers) for histogram normalization */ OSM_LOG(&p_osm->log, OSM_LOG_VERBOSE, "Finding the number of CAs and storing them in cl_map\n"); for (item = cl_qmap_head(&p_updn->p_osm->subn.port_guid_tbl); item != cl_qmap_end(&p_updn->p_osm->subn.port_guid_tbl); item = cl_qmap_next(item)) { p_port = (osm_port_t *)item; if (!p_port->p_node->sw) { p_physp = p_port->p_physp->p_remote_physp; if (!p_physp || !p_physp->p_node->sw) continue; lid_ho = osm_node_get_base_lid(p_physp->p_node, 0); lid_ho = cl_ntoh16(lid_ho); cas_per_sw[lid_ho]++; cas_num++; } } thd1 = cas_num * 0.9; thd2 = cas_num * 0.05; OSM_LOG(&p_osm->log, OSM_LOG_DEBUG, "Found %u CAs and RTRs, %u SWs in the subnet. " "Thresholds are thd1 = %f && thd2 = %f\n", cas_num, cl_qmap_count(&p_osm->subn.sw_guid_tbl), thd1, thd2); OSM_LOG(&p_osm->log, OSM_LOG_VERBOSE, "Passing through all switches to collect Min Hop info\n"); for (item = cl_qmap_head(&p_updn->p_osm->subn.sw_guid_tbl); item != cl_qmap_end(&p_updn->p_osm->subn.sw_guid_tbl); item = cl_qmap_next(item)) { unsigned hop_hist[IB_SUBNET_PATH_HOPS_MAX]; uint16_t max_lid_ho; uint8_t hop_val; uint16_t numHopBarsOverThd1 = 0; uint16_t numHopBarsOverThd2 = 0; p_sw = (osm_switch_t *) item; memset(hop_hist, 0, sizeof(hop_hist)); max_lid_ho = p_sw->max_lid_ho; for (lid_ho = 1; lid_ho <= max_lid_ho; lid_ho++) if (cas_per_sw[lid_ho]) { hop_val = osm_switch_get_least_hops(p_sw, lid_ho); if (hop_val >= IB_SUBNET_PATH_HOPS_MAX) continue; hop_hist[hop_val] += cas_per_sw[lid_ho]; } /* Now recognize the spines by requiring one bar to be above 90% of the number of CAs and RTRs */ for (i = 0; i < IB_SUBNET_PATH_HOPS_MAX; i++) { if (hop_hist[i] > thd1) numHopBarsOverThd1++; if (hop_hist[i] > thd2) numHopBarsOverThd2++; } /* If thd conditions are valid - rank the root node */ if (numHopBarsOverThd1 == 1 && numHopBarsOverThd2 == 1) { OSM_LOG(&p_osm->log, OSM_LOG_DEBUG, "Ranking GUID 0x%" PRIx64 " as root node\n", cl_ntoh64(osm_node_get_node_guid(p_sw->p_node))); ((struct updn_node *)p_sw->priv)->rank = 0; p_updn->num_roots++; } } free(cas_per_sw); _exit: OSM_LOG_EXIT(&p_osm->log); return; }
/********************************************************************** * This function does the bfs of min hop table calculation by guid index * as a starting point. **********************************************************************/ static int updn_bfs_by_node(IN osm_log_t * p_log, IN osm_subn_t * p_subn, IN osm_switch_t * p_sw) { uint8_t pn, pn_rem; cl_qlist_t list; uint16_t lid; struct updn_node *u; updn_switch_dir_t next_dir, current_dir; OSM_LOG_ENTER(p_log); lid = osm_node_get_base_lid(p_sw->p_node, 0); lid = cl_ntoh16(lid); osm_switch_set_hops(p_sw, lid, 0, 0); OSM_LOG(p_log, OSM_LOG_DEBUG, "Starting from switch - port GUID 0x%" PRIx64 " lid %u\n", cl_ntoh64(p_sw->p_node->node_info.port_guid), lid); u = p_sw->priv; u->dir = UP; /* Update list with the new element */ cl_qlist_init(&list); cl_qlist_insert_tail(&list, &u->list); /* BFS the list till no next element */ while (!cl_is_qlist_empty(&list)) { u = (struct updn_node *)cl_qlist_remove_head(&list); u->visited = 0; /* cleanup */ current_dir = u->dir; /* Go over all ports of the switch and find unvisited remote nodes */ for (pn = 1; pn < u->sw->num_ports; pn++) { osm_node_t *p_remote_node; struct updn_node *rem_u; uint8_t current_min_hop, remote_min_hop, set_hop_return_value; osm_switch_t *p_remote_sw; p_remote_node = osm_node_get_remote_node(u->sw->p_node, pn, &pn_rem); /* If no remote node OR remote node is not a SWITCH continue to next pn */ if (!p_remote_node || !p_remote_node->sw) continue; /* Fetch remote guid only after validation of remote node */ p_remote_sw = p_remote_node->sw; rem_u = p_remote_sw->priv; /* Decide which direction to mark it (UP/DOWN) */ next_dir = updn_get_dir(u->rank, rem_u->rank, u->id, rem_u->id); /* Check if this is a legal step : the only illegal step is going from DOWN to UP */ if ((current_dir == DOWN) && (next_dir == UP)) { OSM_LOG(p_log, OSM_LOG_DEBUG, "Avoiding move from 0x%016" PRIx64 " to 0x%016" PRIx64 "\n", cl_ntoh64(osm_node_get_node_guid(u->sw->p_node)), cl_ntoh64(osm_node_get_node_guid(p_remote_node))); /* Illegal step */ continue; } /* Set MinHop value for the current lid */ current_min_hop = osm_switch_get_least_hops(u->sw, lid); /* Check hop count if better insert into list && update the remote node Min Hop Table */ remote_min_hop = osm_switch_get_hop_count(p_remote_sw, lid, pn_rem); if (current_min_hop + 1 < remote_min_hop) { set_hop_return_value = osm_switch_set_hops(p_remote_sw, lid, pn_rem, current_min_hop + 1); if (set_hop_return_value) { OSM_LOG(p_log, OSM_LOG_ERROR, "ERR AA01: " "Invalid value returned from set min hop is: %d\n", set_hop_return_value); } /* Check if remote port has already been visited */ if (!rem_u->visited) { /* Insert updn_switch item into the list */ rem_u->dir = next_dir; rem_u->visited = 1; cl_qlist_insert_tail(&list, &rem_u->list); } } } } OSM_LOG_EXIT(p_log); return 0; }
void osm_lftr_rcv_process(IN void *ctx, IN void *data) { osm_sa_t *sa = ctx; osm_madw_t *p_madw = data; const ib_sa_mad_t *p_rcvd_mad; const ib_lft_record_t *p_rcvd_rec; cl_qlist_t rec_list; osm_lftr_search_ctxt_t context; osm_physp_t *p_req_physp; CL_ASSERT(sa); OSM_LOG_ENTER(sa->p_log); CL_ASSERT(p_madw); p_rcvd_mad = osm_madw_get_sa_mad_ptr(p_madw); p_rcvd_rec = (ib_lft_record_t *) ib_sa_mad_get_payload_ptr(p_rcvd_mad); CL_ASSERT(p_rcvd_mad->attr_id == IB_MAD_ATTR_LFT_RECORD); /* we only support SubnAdmGet and SubnAdmGetTable methods */ if (p_rcvd_mad->method != IB_MAD_METHOD_GET && p_rcvd_mad->method != IB_MAD_METHOD_GETTABLE) { OSM_LOG(sa->p_log, OSM_LOG_ERROR, "ERR 4408: " "Unsupported Method (%s)\n", ib_get_sa_method_str(p_rcvd_mad->method)); osm_sa_send_error(sa, p_madw, IB_MAD_STATUS_UNSUP_METHOD_ATTR); goto Exit; } /* update the requester physical port */ p_req_physp = osm_get_physp_by_mad_addr(sa->p_log, sa->p_subn, osm_madw_get_mad_addr_ptr (p_madw)); if (p_req_physp == NULL) { OSM_LOG(sa->p_log, OSM_LOG_ERROR, "ERR 4407: " "Cannot find requester physical port\n"); goto Exit; } OSM_LOG(sa->p_log, OSM_LOG_DEBUG, "Requester port GUID 0x%" PRIx64 "\n", cl_ntoh64(osm_physp_get_port_guid(p_req_physp))); cl_qlist_init(&rec_list); context.p_rcvd_rec = p_rcvd_rec; context.p_list = &rec_list; context.comp_mask = p_rcvd_mad->comp_mask; context.sa = sa; context.p_req_physp = p_req_physp; cl_plock_acquire(sa->p_lock); /* Go over all switches */ cl_qmap_apply_func(&sa->p_subn->sw_guid_tbl, lftr_rcv_by_comp_mask, &context); cl_plock_release(sa->p_lock); osm_sa_respond(sa, p_madw, sizeof(ib_lft_record_t), &rec_list); Exit: OSM_LOG_EXIT(sa->p_log); }
static ib_api_status_t __osmv_dispatch_rmpp_rcv(IN osm_bind_handle_t h_bind, IN const ib_mad_t * p_mad, IN osmv_txn_ctx_t * p_txn, IN const osm_mad_addr_t * p_mad_addr) { ib_api_status_t status = IB_SUCCESS; osmv_rmpp_recv_ctx_t *p_recv_ctx = osmv_txn_get_rmpp_recv_ctx(p_txn); osmv_bind_obj_t *p_bo = (osmv_bind_obj_t *) h_bind; boolean_t is_last1 = FALSE, is_last2 = FALSE; osm_madw_t *p_new_madw = NULL, *p_req_madw = NULL; ib_mad_t *p_mad_buf; uint32_t size = 0; uint64_t key = osmv_txn_get_key(p_txn); uint64_t tid = osmv_txn_get_tid(p_txn); OSM_LOG_ENTER(p_bo->p_vendor->p_log); if (TRUE == osmv_rmpp_is_ack(p_mad)) { osm_log(p_bo->p_vendor->p_log, OSM_LOG_DEBUG, "Not supposed to receive ACK's --> dropping the MAD\n"); goto dispatch_rmpp_rcv_done; } if (TRUE == osmv_rmpp_is_abort_stop(p_mad)) { osm_log(p_bo->p_vendor->p_log, OSM_LOG_DEBUG, "__osmv_dispatch_rmpp_rcv: ERR 6504: " "The Remote Side stopped sending\n"); status = IB_REMOTE_ERROR; goto dispatch_rmpp_rcv_done; } status = __osmv_dispatch_accept_seg(h_bind, p_txn, p_mad); switch (status) { case IB_SUCCESS: /* Check wheter this is the legal last MAD */ /* Criteria #1: the received MAD is marked last */ is_last1 = osmv_rmpp_is_last(p_mad); /* Criteria #2: the total accumulated length hits the advertised one */ is_last2 = is_last1; size = osmv_rmpp_recv_ctx_get_byte_num_from_first(p_recv_ctx); if (size > 0) { is_last2 = (osmv_rmpp_recv_ctx_get_cur_byte_num(p_recv_ctx) >= size); } if (is_last1 != is_last2) { osmv_rmpp_send_nak(h_bind, p_mad, p_mad_addr, IB_RMPP_TYPE_ABORT, IB_RMPP_STATUS_BAD_LEN); status = IB_ERROR; goto dispatch_rmpp_rcv_done; } /* TBD Consider an optimization - sending an ACK * only for the last segment in the window */ __osmv_dispatch_send_ack(h_bind, p_mad, p_txn, p_mad_addr); break; case IB_INSUFFICIENT_RESOURCES: /* An out-of-order segment received. Send the ACK anyway */ __osmv_dispatch_send_ack(h_bind, p_mad, p_txn, p_mad_addr); status = IB_SUCCESS; goto dispatch_rmpp_rcv_done; case IB_INSUFFICIENT_MEMORY: osmv_rmpp_send_nak(h_bind, p_mad, p_mad_addr, IB_RMPP_TYPE_STOP, IB_RMPP_STATUS_RESX); goto dispatch_rmpp_rcv_done; default: /* Illegal return code */ CL_ASSERT(FALSE); } if (TRUE != is_last1) { osm_log(p_bo->p_vendor->p_log, OSM_LOG_DEBUG, "RMPP MADW assembly continues, TID=0x%" PRIx64 "\n", tid); goto dispatch_rmpp_rcv_done; } /* This is the last packet. */ if (0 == size) { /* The total size was not advertised in the first packet */ size = osmv_rmpp_recv_ctx_get_byte_num_from_last(p_recv_ctx); } /* NOTE: the received mad might not be >= 256 bytes. some MADs might contain several SA records but still be less then a full MAD. We have to use RMPP to send them over since on a regular "simple" MAD there is no way to know how many records were sent */ /* Build the MAD wrapper to be returned to the user. * The actual storage for the MAD is allocated there. */ p_new_madw = osm_mad_pool_get(p_bo->p_osm_pool, h_bind, size, p_mad_addr); if (NULL == p_new_madw) { osm_log(p_bo->p_vendor->p_log, OSM_LOG_ERROR, "__osmv_dispatch_rmpp_rcv: ERR 6506: " "Out Of Memory - could not allocate %d bytes for the MADW\n", size); status = IB_INSUFFICIENT_MEMORY; goto dispatch_rmpp_rcv_done; } p_req_madw = osmv_txn_get_madw(p_txn); p_mad_buf = osm_madw_get_mad_ptr(p_new_madw); status = osmv_rmpp_recv_ctx_reassemble_arbt_mad(p_recv_ctx, size, (uint8_t *) p_mad_buf); if (IB_SUCCESS != status) { osm_log(p_bo->p_vendor->p_log, OSM_LOG_ERROR, "__osmv_dispatch_rmpp_rcv: ERR 6507: " "Internal error - could not reassemble the result MAD\n"); goto dispatch_rmpp_rcv_done; /* What can happen here? */ } /* The MAD is assembled, we are about to apply the callback. * Delete the transaction context, unless the transaction is double sided */ if (FALSE == osmv_txn_is_rmpp_init_by_peer(p_txn) || FALSE == osmv_mad_is_multi_resp(p_mad)) { osmv_txn_done(h_bind, key, FALSE); } osm_log(p_bo->p_vendor->p_log, OSM_LOG_DEBUG, "RMPP MADW %p assembly complete, TID=0x%" PRIx64 "\n", p_new_madw, tid); p_mad_buf->trans_id = cl_hton64(tid); osm_log(p_bo->p_vendor->p_log, OSM_LOG_DEBUG, "Restoring the original TID to 0x%" PRIx64 "\n", cl_ntoh64(p_mad_buf->trans_id)); /* Finally, do the job! */ p_bo->recv_cb(p_new_madw, p_bo->cb_context, p_req_madw); dispatch_rmpp_rcv_done: OSM_LOG_EXIT(p_bo->p_vendor->p_log); return status; }
void osm_smir_rcv_process(IN void *ctx, IN void *data) { osm_sa_t *sa = ctx; osm_madw_t *p_madw = data; const ib_sa_mad_t *sad_mad; const ib_sminfo_record_t *p_rcvd_rec; const osm_port_t *p_port = NULL; const ib_sm_info_t *p_smi; cl_qlist_t rec_list; osm_smir_search_ctxt_t context; ib_api_status_t status = IB_SUCCESS; ib_net64_t comp_mask; ib_net64_t port_guid; osm_physp_t *p_req_physp; osm_port_t *local_port; osm_remote_sm_t *p_rem_sm; cl_qmap_t *p_sm_guid_tbl; uint8_t pri_state; CL_ASSERT(sa); OSM_LOG_ENTER(sa->p_log); CL_ASSERT(p_madw); sad_mad = osm_madw_get_sa_mad_ptr(p_madw); p_rcvd_rec = (ib_sminfo_record_t *) ib_sa_mad_get_payload_ptr(sad_mad); comp_mask = sad_mad->comp_mask; CL_ASSERT(sad_mad->attr_id == IB_MAD_ATTR_SMINFO_RECORD); /* we only support SubnAdmGet and SubnAdmGetTable methods */ if (sad_mad->method != IB_MAD_METHOD_GET && sad_mad->method != IB_MAD_METHOD_GETTABLE) { OSM_LOG(sa->p_log, OSM_LOG_ERROR, "ERR 2804: " "Unsupported Method (%s)\n", ib_get_sa_method_str(sad_mad->method)); osm_sa_send_error(sa, p_madw, IB_MAD_STATUS_UNSUP_METHOD_ATTR); goto Exit; } /* update the requester physical port */ p_req_physp = osm_get_physp_by_mad_addr(sa->p_log, sa->p_subn, osm_madw_get_mad_addr_ptr (p_madw)); if (p_req_physp == NULL) { OSM_LOG(sa->p_log, OSM_LOG_ERROR, "ERR 2803: " "Cannot find requester physical port\n"); goto Exit; } if (OSM_LOG_IS_ACTIVE_V2(sa->p_log, OSM_LOG_DEBUG)) { OSM_LOG(sa->p_log, OSM_LOG_DEBUG, "Requester port GUID 0x%" PRIx64 "\n", cl_ntoh64(osm_physp_get_port_guid(p_req_physp))); osm_dump_sm_info_record_v2(sa->p_log, p_rcvd_rec, FILE_ID, OSM_LOG_DEBUG); } p_smi = &p_rcvd_rec->sm_info; cl_qlist_init(&rec_list); context.p_rcvd_rec = p_rcvd_rec; context.p_list = &rec_list; context.comp_mask = sad_mad->comp_mask; context.sa = sa; context.p_req_physp = p_req_physp; cl_plock_acquire(sa->p_lock); /* If the user specified a LID, it obviously narrows our work load, since we don't have to search every port */ if (comp_mask & IB_SMIR_COMPMASK_LID) { p_port = osm_get_port_by_lid(sa->p_subn, p_rcvd_rec->lid); if (!p_port) { status = IB_NOT_FOUND; OSM_LOG(sa->p_log, OSM_LOG_ERROR, "ERR 2806: " "No port found with LID %u\n", cl_ntoh16(p_rcvd_rec->lid)); } } if (status == IB_SUCCESS) { /* Handle our own SM first */ local_port = osm_get_port_by_guid(sa->p_subn, sa->p_subn->sm_port_guid); if (!local_port) { cl_plock_release(sa->p_lock); OSM_LOG(sa->p_log, OSM_LOG_ERROR, "ERR 2809: " "No port found with GUID 0x%016" PRIx64 "\n", cl_ntoh64(sa->p_subn->sm_port_guid)); goto Exit; } if (!p_port || local_port == p_port) { if (FALSE == osm_physp_share_pkey(sa->p_log, p_req_physp, local_port->p_physp, sa->p_subn->opt.allow_both_pkeys)) { cl_plock_release(sa->p_lock); OSM_LOG(sa->p_log, OSM_LOG_ERROR, "ERR 2805: " "Cannot get SMInfo record due to pkey violation\n"); goto Exit; } /* Check that other search components specified match */ if ((comp_mask & IB_SMIR_COMPMASK_GUID) && sa->p_subn->sm_port_guid != p_smi->guid) goto Remotes; if ((comp_mask & IB_SMIR_COMPMASK_PRIORITY) && sa->p_subn->opt.sm_priority != ib_sminfo_get_priority(p_smi)) goto Remotes; if ((comp_mask & IB_SMIR_COMPMASK_SMSTATE) && sa->p_subn->sm_state != ib_sminfo_get_state(p_smi)) goto Remotes; /* Now, add local SMInfo to list */ pri_state = sa->p_subn->sm_state & 0x0F; pri_state |= (sa->p_subn->opt.sm_priority & 0x0F) << 4; smir_rcv_new_smir(sa, local_port, context.p_list, sa->p_subn->sm_port_guid, cl_ntoh32(sa->p_subn->p_osm->stats. qp0_mads_sent), pri_state, p_req_physp); } Remotes: if (p_port && p_port != local_port) { /* Find remote SM corresponding to p_port */ port_guid = osm_port_get_guid(p_port); p_sm_guid_tbl = &sa->p_subn->sm_guid_tbl; p_rem_sm = (osm_remote_sm_t *) cl_qmap_get(p_sm_guid_tbl, port_guid); if (p_rem_sm != (osm_remote_sm_t *) cl_qmap_end(p_sm_guid_tbl)) sa_smir_by_comp_mask(sa, p_rem_sm, &context); else OSM_LOG(sa->p_log, OSM_LOG_ERROR, "ERR 280A: " "No remote SM for GUID 0x%016" PRIx64 "\n", cl_ntoh64(port_guid)); } else { /* Go over all other known (remote) SMs */ cl_qmap_apply_func(&sa->p_subn->sm_guid_tbl, sa_smir_by_comp_mask_cb, &context); } } cl_plock_release(sa->p_lock); osm_sa_respond(sa, p_madw, sizeof(ib_sminfo_record_t), &rec_list); Exit: OSM_LOG_EXIT(sa->p_log); }
static int do_lid_matrix_file_load(void *context) { char line[1024]; uint8_t hops[256]; char *file_name; FILE *file; ib_net64_t guid; osm_opensm_t *p_osm = context; osm_switch_t *p_sw; unsigned lineno; uint16_t lid; file_name = p_osm->subn.opt.lid_matrix_dump_file; if (!file_name) { OSM_LOG(&p_osm->log, OSM_LOG_VERBOSE, "lid matrix file name is not given; " "using default lid matrix generation algorithm\n"); return 1; } file = fopen(file_name, "r"); if (!file) { OSM_LOG(&p_osm->log, OSM_LOG_ERROR | OSM_LOG_SYS, "ERR 6305: " "cannot open lid matrix file \'%s\': %m\n", file_name); return -1; } lineno = 0; p_sw = NULL; while (fgets(line, sizeof(line) - 1, file) != NULL) { char *p, *q; lineno++; p = line; while (isspace(*p)) p++; if (*p == '#') continue; if (!strncmp(p, "Switch", 6)) { q = strstr(p, " guid 0x"); if (!q) { OSM_LOG(&p_osm->log, OSM_LOG_ERROR, "PARSE ERROR: %s:%u: " "cannot parse switch definition\n", file_name, lineno); return -1; } p = q + 8; guid = strtoull(p, &q, 16); if (q == p || !isspace(*q)) { OSM_LOG(&p_osm->log, OSM_LOG_ERROR, "PARSE ERROR: %s:%u: " "cannot parse switch guid: \'%s\'\n", file_name, lineno, p); return -1; } guid = cl_hton64(guid); p_sw = osm_get_switch_by_guid(&p_osm->subn, guid); if (!p_sw) { OSM_LOG(&p_osm->log, OSM_LOG_VERBOSE, "cannot find switch %016" PRIx64 "\n", cl_ntoh64(guid)); continue; } } else if (p_sw && !strncmp(p, "0x", 2)) { unsigned long num; unsigned len = 0; memset(hops, 0xff, sizeof(hops)); p += 2; num = strtoul(p, &q, 16); if (num > 0xffff || q == p || (*q != ':' && !isspace(*q))) { OSM_LOG(&p_osm->log, OSM_LOG_ERROR, "PARSE ERROR: %s:%u: " "cannot parse lid: \'%s\'\n", file_name, lineno, p); return -1; } /* Just checked the range, so casting is safe */ lid = (uint16_t) num; p = q; while (isspace(*p) || *p == ':') p++; while (len < 256 && *p && *p != '#') { num = strtoul(p, &q, 16); if (num > 0xff || q == p) { OSM_LOG(&p_osm->log, OSM_LOG_ERROR, "PARSE ERROR: %s:%u: " "cannot parse hops number: \'%s\'\n", file_name, lineno, p); return -1; } /* Just checked the range, so casting is safe */ hops[len++] = (uint8_t) num; p = q; while (isspace(*p)) p++; } /* additionally try to extract guid */ q = strstr(p, " portguid 0x"); if (!q) { OSM_LOG(&p_osm->log, OSM_LOG_VERBOSE, "PARSE WARNING: %s:%u: " "cannot find port guid " "(maybe broken dump): \'%s\'\n", file_name, lineno, p); guid = 0; } else { p = q + 12; guid = strtoull(p, &q, 16); if (q == p || !isspace(*q)) { OSM_LOG(&p_osm->log, OSM_LOG_VERBOSE, "PARSE WARNING: %s:%u: " "cannot parse port guid " "(maybe broken dump): \'%s\'\n", file_name, lineno, p); guid = 0; } } guid = cl_hton64(guid); add_lid_hops(p_osm, p_sw, lid, guid, hops, len); } } fclose(file); return 0; }
ib_api_status_t osm_vendor_get_guid_ca_and_port(IN osm_vendor_t * const p_vend, IN ib_net64_t const guid, OUT VAPI_hca_hndl_t * p_hca_hndl, OUT VAPI_hca_id_t * p_hca_id, OUT uint8_t * p_hca_idx, OUT uint32_t * p_port_num) { ib_api_status_t status; VAPI_hca_id_t *p_ca_ids = NULL; VAPI_ret_t vapi_res; VAPI_hca_hndl_t hca_hndl; VAPI_hca_vendor_t hca_vendor; VAPI_hca_cap_t hca_cap; IB_gid_t *p_port_gid = NULL; uint16_t maxNumGids; ib_net64_t port_guid; uint32_t ca, portIdx, ca_count; OSM_LOG_ENTER(p_vend->p_log); CL_ASSERT(p_vend); /* * 1) Determine the number of CA's * 2) Allocate an array big enough to hold the ca info objects. * 3) Call again to retrieve the guids. */ status = __osm_vendor_get_ca_ids(p_vend, &p_ca_ids, &ca_count); if (status != IB_SUCCESS) { osm_log(p_vend->p_log, OSM_LOG_ERROR, "osm_vendor_get_guid_ca_and_port: ERR 3D16: " "Fail to get CA Ids.\n"); goto Exit; } /* * For each CA, retrieve the CA info attributes */ for (ca = 0; ca < ca_count; ca++) { /* get the HCA handle */ vapi_res = EVAPI_get_hca_hndl(p_ca_ids[ca], &hca_hndl); if (vapi_res != VAPI_OK) { osm_log(p_vend->p_log, OSM_LOG_ERROR, "osm_vendor_get_guid_ca_and_port: ERR 3D17: " "Fail to get HCA handle (%u).\n", vapi_res); goto Exit; } /* get the CA attributes - to know how many ports it has: */ if (osm_log_is_active(p_vend->p_log, OSM_LOG_DEBUG)) { osm_log(p_vend->p_log, OSM_LOG_DEBUG, "osm_vendor_get_guid_ca_and_port: " "Querying CA %s.\n", p_ca_ids[ca]); } /* query and get the HCA capability */ vapi_res = VAPI_query_hca_cap(hca_hndl, &hca_vendor, &hca_cap); if (vapi_res != VAPI_OK) { osm_log(p_vend->p_log, OSM_LOG_ERROR, "osm_vendor_get_guid_ca_and_port: ERR 3D18: " "Fail to get HCA Capabilities (%u).\n", vapi_res); goto Exit; } /* go over all ports - to obtail their guids */ for (portIdx = 0; portIdx < hca_cap.phys_port_num; portIdx++) { vapi_res = VAPI_query_hca_gid_tbl(hca_hndl, portIdx + 1, 0, &maxNumGids, NULL); p_port_gid = (IB_gid_t *) malloc(maxNumGids * sizeof(IB_gid_t)); /* get the port guid */ vapi_res = VAPI_query_hca_gid_tbl(hca_hndl, portIdx + 1, maxNumGids, &maxNumGids, p_port_gid); if (vapi_res != VAPI_OK) { osm_log(p_vend->p_log, OSM_LOG_ERROR, "osm_vendor_get_guid_ca_and_port: ERR 3D19: " "Fail to get HCA Port GID (%d).\n", vapi_res); goto Exit; } /* convert to SF style */ __osm_vendor_gid_to_guid(p_port_gid[0], (VAPI_gid_t *) & port_guid); /* finally did we find it ? */ if (port_guid == guid) { *p_hca_hndl = hca_hndl; memcpy(p_hca_id, p_ca_ids[ca], sizeof(VAPI_hca_id_t)); *p_hca_idx = ca; *p_port_num = portIdx + 1; status = IB_SUCCESS; goto Exit; } free(p_port_gid); p_port_gid = NULL; } /* ALL PORTS */ } /* all HCAs */ osm_log(p_vend->p_log, OSM_LOG_ERROR, "osm_vendor_get_guid_ca_and_port: ERR 3D20: " "Fail to find HCA and Port for Port Guid 0x%" PRIx64 "\n", cl_ntoh64(guid)); status = IB_INVALID_GUID; Exit: if (p_ca_ids != NULL) free(p_ca_ids); if (p_port_gid != NULL) free(p_port_gid); OSM_LOG_EXIT(p_vend->p_log); return (status); }
static boolean_t __osm_link_mgr_set_physp_pi(osm_sm_t * sm, IN osm_physp_t * const p_physp, IN uint8_t const port_state) { uint8_t payload[IB_SMP_DATA_SIZE]; ib_port_info_t *const p_pi = (ib_port_info_t *) payload; const ib_port_info_t *p_old_pi; osm_madw_context_t context; osm_node_t *p_node; ib_api_status_t status; uint8_t port_num; uint8_t mtu; uint8_t op_vls; boolean_t esp0 = FALSE; boolean_t send_set = FALSE; osm_physp_t *p_remote_physp; OSM_LOG_ENTER(sm->p_log); p_node = osm_physp_get_node_ptr(p_physp); port_num = osm_physp_get_port_num(p_physp); if (port_num == 0) { /* CAs don't have a port 0, and for switch port 0, we need to check if this is enhanced or base port 0. For base port 0 the following parameters are not valid (p822, table 145). */ if (!p_node->sw) { OSM_LOG(sm->p_log, OSM_LOG_ERROR, "ERR 4201: " "Cannot find switch by guid: 0x%" PRIx64 "\n", cl_ntoh64(p_node->node_info.node_guid)); goto Exit; } if (ib_switch_info_is_enhanced_port0(&p_node->sw->switch_info) == FALSE) { /* This means the switch doesn't support enhanced port 0. Can skip it. */ OSM_LOG(sm->p_log, OSM_LOG_DEBUG, "Skipping port 0, GUID 0x%016" PRIx64 "\n", cl_ntoh64(osm_physp_get_port_guid(p_physp))); goto Exit; } esp0 = TRUE; } /* PAST THIS POINT WE ARE HANDLING EITHER A NON PORT 0 OR ENHANCED PORT 0 */ p_old_pi = &p_physp->port_info; memset(payload, 0, IB_SMP_DATA_SIZE); memcpy(payload, p_old_pi, sizeof(ib_port_info_t)); /* Should never write back a value that is bigger then 3 in the PortPhysicalState field - so can not simply copy! Actually we want to write there: port physical state - no change, link down default state = polling port state - as requested. */ p_pi->state_info2 = 0x02; ib_port_info_set_port_state(p_pi, port_state); if (ib_port_info_get_link_down_def_state(p_pi) != ib_port_info_get_link_down_def_state(p_old_pi)) send_set = TRUE; /* didn't get PortInfo before */ if (!ib_port_info_get_port_state(p_old_pi)) send_set = TRUE; /* we only change port fields if we do not change state */ if (port_state == IB_LINK_NO_CHANGE) { /* The following fields are relevant only for CA port, router, or Enh. SP0 */ if (osm_node_get_type(p_node) != IB_NODE_TYPE_SWITCH || port_num == 0) { p_pi->m_key = sm->p_subn->opt.m_key; if (memcmp(&p_pi->m_key, &p_old_pi->m_key, sizeof(p_pi->m_key))) send_set = TRUE; p_pi->subnet_prefix = sm->p_subn->opt.subnet_prefix; if (memcmp(&p_pi->subnet_prefix, &p_old_pi->subnet_prefix, sizeof(p_pi->subnet_prefix))) send_set = TRUE; p_pi->base_lid = osm_physp_get_base_lid(p_physp); if (memcmp(&p_pi->base_lid, &p_old_pi->base_lid, sizeof(p_pi->base_lid))) send_set = TRUE; /* we are initializing the ports with our local sm_base_lid */ p_pi->master_sm_base_lid = sm->p_subn->sm_base_lid; if (memcmp(&p_pi->master_sm_base_lid, &p_old_pi->master_sm_base_lid, sizeof(p_pi->master_sm_base_lid))) send_set = TRUE; p_pi->m_key_lease_period = sm->p_subn->opt.m_key_lease_period; if (memcmp(&p_pi->m_key_lease_period, &p_old_pi->m_key_lease_period, sizeof(p_pi->m_key_lease_period))) send_set = TRUE; if (esp0 == FALSE) p_pi->mkey_lmc = sm->p_subn->opt.lmc; else { if (sm->p_subn->opt.lmc_esp0) p_pi->mkey_lmc = sm->p_subn->opt.lmc; else p_pi->mkey_lmc = 0; } if (memcmp(&p_pi->mkey_lmc, &p_old_pi->mkey_lmc, sizeof(p_pi->mkey_lmc))) send_set = TRUE; ib_port_info_set_timeout(p_pi, sm->p_subn->opt. subnet_timeout); if (ib_port_info_get_timeout(p_pi) != ib_port_info_get_timeout(p_old_pi)) send_set = TRUE; } /* Several timeout mechanisms: */ p_remote_physp = osm_physp_get_remote(p_physp); if (port_num != 0 && p_remote_physp) { if (osm_node_get_type(osm_physp_get_node_ptr(p_physp)) == IB_NODE_TYPE_ROUTER) { ib_port_info_set_hoq_lifetime(p_pi, sm->p_subn-> opt. leaf_head_of_queue_lifetime); } else if (osm_node_get_type (osm_physp_get_node_ptr(p_physp)) == IB_NODE_TYPE_SWITCH) { /* Is remote end CA or router (a leaf port) ? */ if (osm_node_get_type (osm_physp_get_node_ptr(p_remote_physp)) != IB_NODE_TYPE_SWITCH) { ib_port_info_set_hoq_lifetime(p_pi, sm-> p_subn-> opt. leaf_head_of_queue_lifetime); ib_port_info_set_vl_stall_count(p_pi, sm-> p_subn-> opt. leaf_vl_stall_count); } else { ib_port_info_set_hoq_lifetime(p_pi, sm-> p_subn-> opt. head_of_queue_lifetime); ib_port_info_set_vl_stall_count(p_pi, sm-> p_subn-> opt. vl_stall_count); } } if (ib_port_info_get_hoq_lifetime(p_pi) != ib_port_info_get_hoq_lifetime(p_old_pi) || ib_port_info_get_vl_stall_count(p_pi) != ib_port_info_get_vl_stall_count(p_old_pi)) send_set = TRUE; } ib_port_info_set_phy_and_overrun_err_thd(p_pi, sm->p_subn->opt. local_phy_errors_threshold, sm->p_subn->opt. overrun_errors_threshold); if (memcmp(&p_pi->error_threshold, &p_old_pi->error_threshold, sizeof(p_pi->error_threshold))) send_set = TRUE; /* Set the easy common parameters for all port types, then determine the neighbor MTU. */ p_pi->link_width_enabled = p_old_pi->link_width_supported; if (memcmp(&p_pi->link_width_enabled, &p_old_pi->link_width_enabled, sizeof(p_pi->link_width_enabled))) send_set = TRUE; if (sm->p_subn->opt.force_link_speed && (sm->p_subn->opt.force_link_speed != 15 || ib_port_info_get_link_speed_enabled(p_pi) != ib_port_info_get_link_speed_sup(p_pi))) { ib_port_info_set_link_speed_enabled(p_pi, sm->p_subn->opt. force_link_speed); if (memcmp(&p_pi->link_speed, &p_old_pi->link_speed, sizeof(p_pi->link_speed))) send_set = TRUE; } /* calc new op_vls and mtu */ op_vls = osm_physp_calc_link_op_vls(sm->p_log, sm->p_subn, p_physp); mtu = osm_physp_calc_link_mtu(sm->p_log, p_physp); ib_port_info_set_neighbor_mtu(p_pi, mtu); if (ib_port_info_get_neighbor_mtu(p_pi) != ib_port_info_get_neighbor_mtu(p_old_pi)) send_set = TRUE; ib_port_info_set_op_vls(p_pi, op_vls); if (ib_port_info_get_op_vls(p_pi) != ib_port_info_get_op_vls(p_old_pi)) send_set = TRUE; /* provide the vl_high_limit from the qos mgr */ if (sm->p_subn->opt.qos && p_physp->vl_high_limit != p_old_pi->vl_high_limit) { send_set = TRUE; p_pi->vl_high_limit = p_physp->vl_high_limit; } } if (port_state != IB_LINK_NO_CHANGE && port_state != ib_port_info_get_port_state(p_old_pi)) { send_set = TRUE; if (port_state == IB_LINK_ACTIVE) context.pi_context.active_transition = TRUE; else context.pi_context.active_transition = FALSE; } context.pi_context.node_guid = osm_node_get_node_guid(p_node); context.pi_context.port_guid = osm_physp_get_port_guid(p_physp); context.pi_context.set_method = TRUE; context.pi_context.light_sweep = FALSE; /* We need to send the PortInfoSet request with the new sm_lid in the following cases: 1. There is a change in the values (send_set == TRUE) 2. This is a switch external port (so it wasn't handled yet by osm_lid_mgr) and first_time_master_sweep flag on the subnet is TRUE, which means the SM just became master, and it then needs to send at PortInfoSet to every port. */ if (osm_node_get_type(p_node) == IB_NODE_TYPE_SWITCH && port_num && sm->p_subn->first_time_master_sweep == TRUE) send_set = TRUE; if (send_set) status = osm_req_set(sm, osm_physp_get_dr_path_ptr(p_physp), payload, sizeof(payload), IB_MAD_ATTR_PORT_INFO, cl_hton32(port_num), CL_DISP_MSGID_NONE, &context); Exit: OSM_LOG_EXIT(sm->p_log); return send_set; }
osm_bind_handle_t osm_vendor_bind(IN osm_vendor_t * const p_vend, IN osm_bind_info_t * const p_bind_info, IN osm_mad_pool_t * const p_mad_pool, IN osm_vend_mad_recv_callback_t mad_recv_callback, IN osm_vend_mad_send_err_callback_t send_err_callback, IN void *context) { osmv_bind_obj_t *p_bo; ib_api_status_t status; char hca_id[32]; cl_status_t cl_st; cl_list_obj_t *p_obj; uint8_t hca_index; if (NULL == p_vend || NULL == p_bind_info || NULL == p_mad_pool || NULL == mad_recv_callback || NULL == send_err_callback) { osm_log(p_vend->p_log, OSM_LOG_ERROR, "osm_vendor_bind: ERR 7302: " "NULL parameter passed in: p_vend=%p p_bind_info=%p p_mad_pool=%p recv_cb=%p send_err_cb=%p\n", p_vend, p_bind_info, p_mad_pool, mad_recv_callback, send_err_callback); return OSM_BIND_INVALID_HANDLE; } p_bo = malloc(sizeof(osmv_bind_obj_t)); if (NULL == p_bo) { osm_log(p_vend->p_log, OSM_LOG_ERROR, "osm_vendor_bind: ERR 7303: could not allocate the bind object\n"); return OSM_BIND_INVALID_HANDLE; } memset(p_bo, 0, sizeof(osmv_bind_obj_t)); p_bo->p_vendor = p_vend; p_bo->recv_cb = mad_recv_callback; p_bo->send_err_cb = send_err_callback; p_bo->cb_context = context; p_bo->p_osm_pool = p_mad_pool; /* obtain the hca name and port num from the guid */ osm_log(p_bo->p_vendor->p_log, OSM_LOG_DEBUG, "osm_vendor_bind: " "Finding CA and Port that owns port guid 0x%" PRIx64 ".\n", cl_ntoh64(p_bind_info->port_guid)); status = osm_vendor_get_guid_ca_and_port(p_bo->p_vendor, p_bind_info->port_guid, &(p_bo->hca_hndl), hca_id, &hca_index, &(p_bo->port_num)); if (status != IB_SUCCESS) { osm_log(p_bo->p_vendor->p_log, OSM_LOG_ERROR, "osm_vendor_bind: ERR 7304: " "Fail to find port number of port guid:0x%016" PRIx64 "\n", p_bind_info->port_guid); free(p_bo); return OSM_BIND_INVALID_HANDLE; } /* Initialize the magic_ptr to the pointer of the p_bo info. This will be used to signal when the object is being destroyed, so no real action will be done then. */ p_bo->magic_ptr = p_bo; p_bo->is_closing = FALSE; cl_spinlock_construct(&(p_bo->lock)); cl_st = cl_spinlock_init(&(p_bo->lock)); if (cl_st != CL_SUCCESS) { osm_log(p_bo->p_vendor->p_log, OSM_LOG_ERROR, "osm_vendor_bind: ERR 7305: " "could not initialize the spinlock ...\n"); free(p_bo); return OSM_BIND_INVALID_HANDLE; } osm_log(p_bo->p_vendor->p_log, OSM_LOG_DEBUG, "osm_vendor_bind: osmv_txnmgr_init ... \n"); if (osmv_txnmgr_init(&p_bo->txn_mgr, p_vend->p_log, &(p_bo->lock)) != IB_SUCCESS) { osm_log(p_bo->p_vendor->p_log, OSM_LOG_ERROR, "osm_vendor_bind: ERR 7306: " "osmv_txnmgr_init failed \n"); cl_spinlock_destroy(&p_bo->lock); free(p_bo); return OSM_BIND_INVALID_HANDLE; } /* Do the real job! (Transport-dependent) */ if (IB_SUCCESS != osmv_transport_init(p_bind_info, hca_id, hca_index, p_bo)) { osm_log(p_bo->p_vendor->p_log, OSM_LOG_ERROR, "osm_vendor_bind: ERR 7307: " "osmv_transport_init failed \n"); osmv_txnmgr_done((osm_bind_handle_t) p_bo); cl_spinlock_destroy(&p_bo->lock); free(p_bo); return OSM_BIND_INVALID_HANDLE; } /* insert bind handle into db */ p_obj = malloc(sizeof(cl_list_obj_t)); if (NULL == p_obj) { osm_log(p_bo->p_vendor->p_log, OSM_LOG_ERROR, "osm_vendor_bind: ERR 7308: " "osm_vendor_bind: could not allocate the list object\n"); osmv_transport_done(p_bo->p_transp_mgr); osmv_txnmgr_done((osm_bind_handle_t) p_bo); cl_spinlock_destroy(&p_bo->lock); free(p_bo); return OSM_BIND_INVALID_HANDLE; } memset(p_obj, 0, sizeof(cl_list_obj_t)); cl_qlist_set_obj(p_obj, p_bo); cl_qlist_insert_head(&p_vend->bind_handles, &p_obj->list_item); return (osm_bind_handle_t) p_bo; }
/* * Prepare an asynchronous QP (rcv) for sending inform info and * handling the incoming reports. * */ ib_api_status_t osmt_bind_inform_qp(IN osmtest_t * const p_osmt, OUT osmt_qp_ctx_t * p_qp_ctx) { ib_net64_t port_guid; VAPI_hca_hndl_t hca_hndl; VAPI_hca_id_t hca_id; uint32_t port_num; VAPI_ret_t vapi_ret; IB_MGT_ret_t mgt_ret; uint8_t hca_index; osm_log_t *p_log = &p_osmt->log; ib_api_status_t status = IB_SUCCESS; OSM_LOG_ENTER(p_log); port_guid = p_osmt->local_port.port_guid; OSM_LOG(p_log, OSM_LOG_DEBUG, "Binding to port 0x%" PRIx64 "\n", cl_ntoh64(port_guid)); /* obtain the hca name and port num from the guid */ OSM_LOG(p_log, OSM_LOG_DEBUG, "Finding CA and Port that owns port guid 0x%" PRIx64 "\n", port_guid); mgt_ret = osm_vendor_get_guid_ca_and_port(p_osmt->p_vendor, port_guid, &hca_hndl, &hca_id[0], &hca_index, &port_num); if (mgt_ret != IB_MGT_OK) { OSM_LOG(p_log, OSM_LOG_ERROR, "ERR 0109: " "Unable to obtain CA and port (%d).\n"); status = IB_ERROR; goto Exit; } #define OSMT_MTL_REVERSE_QP1_WELL_KNOWN_Q_KEY 0x80010000 strncpy(p_qp_ctx->qp_bind_hndl.hca_id, hca_id, sizeof(hca_id)); p_qp_ctx->qp_bind_hndl.hca_hndl = hca_hndl; p_qp_ctx->qp_bind_hndl.port_num = port_num; p_qp_ctx->qp_bind_hndl.max_outs_sq = 10; p_qp_ctx->qp_bind_hndl.max_outs_rq = 10; p_qp_ctx->qp_bind_hndl.qkey = OSMT_MTL_REVERSE_QP1_WELL_KNOWN_Q_KEY; vapi_ret = osmt_mtl_init_opened_hca(&p_qp_ctx->qp_bind_hndl); if (vapi_ret != VAPI_OK) { OSM_LOG(p_log, OSM_LOG_ERROR, "ERR 0114: " "Error initializing QP.\n"); status = IB_ERROR; goto Exit; } /* we use the pre-allocated buffers for send and receive : send from buf[0] receive from buf[2] */ p_qp_ctx->p_send_buf = (uint8_t *) p_qp_ctx->qp_bind_hndl.buf_ptr + GRH_LEN; p_qp_ctx->p_recv_buf = (uint8_t *) p_qp_ctx->qp_bind_hndl.buf_ptr + 2 * (GRH_LEN + MAD_BLOCK_SIZE); /* Need to clear assigned memory of p_send_buf - before using it to send any data */ memset(p_qp_ctx->p_send_buf, 0, MAD_BLOCK_SIZE); status = IB_SUCCESS; OSM_LOG(p_log, OSM_LOG_DEBUG, "Initialized QP:0x%X in VAPI Mode\n", p_qp_ctx->qp_bind_hndl.qp_id); OSM_LOG(p_log, OSM_LOG_DEBUG, "Binding to IB_MGT SMI\n"); /* we also need a QP0 handle for sending packets */ mgt_ret = IB_MGT_get_handle(hca_id, port_num, IB_MGT_SMI, &(p_qp_ctx->ib_mgt_qp0_handle)); if (IB_MGT_OK != mgt_ret) { OSM_LOG(p_log, OSM_LOG_ERROR, "ERR 0115: " "Error obtaining IB_MGT handle to SMI\n"); status = IB_ERROR; goto Exit; } Exit: OSM_LOG_EXIT(p_log); return status; }
static void ucast_mgr_process_port(IN osm_ucast_mgr_t * p_mgr, IN osm_switch_t * p_sw, IN osm_port_t * p_port, IN unsigned lid_offset) { uint16_t min_lid_ho; uint16_t max_lid_ho; uint16_t lid_ho; uint8_t port; boolean_t is_ignored_by_port_prof; ib_net64_t node_guid; unsigned start_from = 1; OSM_LOG_ENTER(p_mgr->p_log); osm_port_get_lid_range_ho(p_port, &min_lid_ho, &max_lid_ho); /* If the lids are zero - then there was some problem with * the initialization. Don't handle this port. */ if (min_lid_ho == 0 || max_lid_ho == 0) { OSM_LOG(p_mgr->p_log, OSM_LOG_ERROR, "ERR 3A04: " "Port 0x%" PRIx64 " (%s port %d) has LID 0. An " "initialization error occurred. Ignoring port\n", cl_ntoh64(osm_port_get_guid(p_port)), p_port->p_node->print_desc, p_port->p_physp->port_num); goto Exit; } lid_ho = min_lid_ho + lid_offset; if (lid_ho > max_lid_ho) goto Exit; if (lid_offset && !p_mgr->is_dor) /* ignore potential overflow - it is handled in osm_switch.c */ start_from = osm_switch_get_port_by_lid(p_sw, lid_ho - 1, OSM_NEW_LFT) + 1; OSM_LOG(p_mgr->p_log, OSM_LOG_DEBUG, "Processing port 0x%" PRIx64 " (\'%s\' port %u), LID %u [%u,%u]\n", cl_ntoh64(osm_port_get_guid(p_port)), p_port->p_node->print_desc, p_port->p_physp->port_num, lid_ho, min_lid_ho, max_lid_ho); /* TODO - This should be runtime error, not a CL_ASSERT() */ CL_ASSERT(max_lid_ho <= IB_LID_UCAST_END_HO); node_guid = osm_node_get_node_guid(p_sw->p_node); /* The lid matrix contains the number of hops to each lid from each port. From this information we determine how best to distribute the LID range across the ports that can reach those LIDs. */ port = osm_switch_recommend_path(p_sw, p_port, lid_ho, start_from, p_mgr->p_subn->ignore_existing_lfts, p_mgr->p_subn->opt.lmc, p_mgr->is_dor, p_mgr->p_subn->opt.port_shifting, !lid_offset && p_port->use_scatter, OSM_LFT); if (port == OSM_NO_PATH) { /* do not try to overwrite the ppro of non existing port ... */ is_ignored_by_port_prof = TRUE; OSM_LOG(p_mgr->p_log, OSM_LOG_DEBUG, "No path to get to LID %u from switch 0x%" PRIx64 "\n", lid_ho, cl_ntoh64(node_guid)); } else { osm_physp_t *p = osm_node_get_physp_ptr(p_sw->p_node, port); if (!p) goto Exit; OSM_LOG(p_mgr->p_log, OSM_LOG_DEBUG, "Routing LID %u to port %u for switch 0x%" PRIx64 "\n", lid_ho, port, cl_ntoh64(node_guid)); /* we would like to optionally ignore this port in equalization as in the case of the Mellanox Anafa Internal PCI TCA port */ is_ignored_by_port_prof = p->is_prof_ignored; /* We also would ignore this route if the target lid is of a switch and the port_profile_switch_node is not TRUE */ if (!p_mgr->p_subn->opt.port_profile_switch_nodes) is_ignored_by_port_prof |= (osm_node_get_type(p_port->p_node) == IB_NODE_TYPE_SWITCH); } /* We have selected the port for this LID. Write it to the forwarding tables. */ p_sw->new_lft[lid_ho] = port; if (!is_ignored_by_port_prof) { struct osm_remote_node *rem_node_used; osm_switch_count_path(p_sw, port); if (port > 0 && p_port->priv && (rem_node_used = find_and_add_remote_sys(p_sw, port, p_mgr->is_dor, p_port->priv))) rem_node_used->forwarded_to++; } Exit: OSM_LOG_EXIT(p_mgr->p_log); }
void osm_sa_respond(osm_sa_t *sa, osm_madw_t *madw, size_t attr_size, cl_qlist_t *list) { struct item_data { cl_list_item_t list; char data[0]; }; cl_list_item_t *item; osm_madw_t *resp_madw; ib_sa_mad_t *sa_mad, *resp_sa_mad; unsigned num_rec, i; #ifndef VENDOR_RMPP_SUPPORT unsigned trim_num_rec; #endif unsigned char *p; sa_mad = osm_madw_get_sa_mad_ptr(madw); num_rec = cl_qlist_count(list); /* * C15-0.1.30: * If we do a SubnAdmGet and got more than one record it is an error! */ if (sa_mad->method == IB_MAD_METHOD_GET && num_rec > 1) { OSM_LOG(sa->p_log, OSM_LOG_ERROR, "ERR 4C05: " "Got %u records for SubnAdmGet(%s) comp_mask 0x%016" PRIx64 "\n", num_rec, ib_get_sa_attr_str(sa_mad->attr_id), cl_ntoh64(sa_mad->comp_mask)); osm_sa_send_error(sa, madw, IB_SA_MAD_STATUS_TOO_MANY_RECORDS); goto Exit; } #ifndef VENDOR_RMPP_SUPPORT trim_num_rec = (MAD_BLOCK_SIZE - IB_SA_MAD_HDR_SIZE) / attr_size; if (trim_num_rec < num_rec) { OSM_LOG(sa->p_log, OSM_LOG_VERBOSE, "Number of records:%u trimmed to:%u to fit in one MAD\n", num_rec, trim_num_rec); num_rec = trim_num_rec; } #endif OSM_LOG(sa->p_log, OSM_LOG_DEBUG, "Returning %u records\n", num_rec); if (sa_mad->method == IB_MAD_METHOD_GET && num_rec == 0) { osm_sa_send_error(sa, madw, IB_SA_MAD_STATUS_NO_RECORDS); goto Exit; } /* * Get a MAD to reply. Address of Mad is in the received mad_wrapper */ resp_madw = osm_mad_pool_get(sa->p_mad_pool, madw->h_bind, num_rec * attr_size + IB_SA_MAD_HDR_SIZE, &madw->mad_addr); if (!resp_madw) { OSM_LOG(sa->p_log, OSM_LOG_ERROR, "ERR 4C06: " "osm_mad_pool_get failed\n"); osm_sa_send_error(sa, madw, IB_SA_MAD_STATUS_NO_RESOURCES); goto Exit; } resp_sa_mad = osm_madw_get_sa_mad_ptr(resp_madw); /* Copy the MAD header back into the response mad. Set the 'R' bit and the payload length, Then copy all records from the list into the response payload. */ memcpy(resp_sa_mad, sa_mad, IB_SA_MAD_HDR_SIZE); if (resp_sa_mad->method == IB_MAD_METHOD_SET) resp_sa_mad->method = IB_MAD_METHOD_GET; resp_sa_mad->method |= IB_MAD_METHOD_RESP_MASK; /* C15-0.1.5 - always return SM_Key = 0 (table 185 p 884) */ resp_sa_mad->sm_key = 0; /* Fill in the offset (paylen will be done by the rmpp SAR) */ resp_sa_mad->attr_offset = num_rec ? ib_get_attr_offset(attr_size) : 0; p = ib_sa_mad_get_payload_ptr(resp_sa_mad); #ifndef VENDOR_RMPP_SUPPORT /* we support only one packet RMPP - so we will set the first and last flags for gettable */ if (resp_sa_mad->method == IB_MAD_METHOD_GETTABLE_RESP) { resp_sa_mad->rmpp_type = IB_RMPP_TYPE_DATA; resp_sa_mad->rmpp_flags = IB_RMPP_FLAG_FIRST | IB_RMPP_FLAG_LAST | IB_RMPP_FLAG_ACTIVE; } #else /* forcefully define the packet as RMPP one */ if (resp_sa_mad->method == IB_MAD_METHOD_GETTABLE_RESP) resp_sa_mad->rmpp_flags = IB_RMPP_FLAG_ACTIVE; #endif for (i = 0; i < num_rec; i++) { item = cl_qlist_remove_head(list); memcpy(p, ((struct item_data *)item)->data, attr_size); p += attr_size; free(item); } osm_dump_sa_mad(sa->p_log, resp_sa_mad, OSM_LOG_FRAMES); osm_sa_send(sa, resp_madw, FALSE); Exit: /* need to set the mem free ... */ item = cl_qlist_remove_head(list); while (item != cl_qlist_end(list)) { free(item); item = cl_qlist_remove_head(list); } }
static void __osm_al_send_callback(IN void *mad_svc_context, IN ib_mad_element_t * p_elem) { osm_al_bind_info_t *const p_bind = (osm_al_bind_info_t *) mad_svc_context; osm_vendor_t *const p_vend = p_bind->p_vend; osm_madw_t *const p_madw = (osm_madw_t *) p_elem->context1; osm_vend_wrap_t *const p_vw = osm_madw_get_vend_ptr(p_madw); ib_mad_t *p_mad; OSM_LOG_ENTER(p_vend->p_log); CL_ASSERT(p_vw); CL_ASSERT(p_vw->h_av); /* Destroy the address vector as necessary. */ if (p_vw->h_av != p_bind->h_dr_av) { if (osm_log_is_active(p_vend->p_log, OSM_LOG_DEBUG)) { osm_log(p_vend->p_log, OSM_LOG_DEBUG, "__osm_al_send_callback: " "Destroying av handle %p.\n", p_vw->h_av); } ib_destroy_av(p_vw->h_av); } p_mad = ib_get_mad_buf(p_elem); if (p_elem->resp_expected) { /* If the send was unsuccessful, notify the user for MADs that were expecting a response. A NULL mad wrapper parameter is the user's clue that the transaction turned sour. Otherwise, do nothing for successful sends when a reponse is expected. The mad will be returned to the pool later. */ p_madw->status = __osm_al_convert_wcs(p_elem->status); if (p_elem->status != IB_WCS_SUCCESS) { osm_log(p_vend->p_log, OSM_LOG_DEBUG, "__osm_al_send_callback: " "MAD completed with work queue error: %s.\n", ib_get_wc_status_str(p_elem->status)); /* Return any wrappers to the pool that may have been pre-emptively allocated to handle a receive. */ if (p_vw->p_resp_madw) { osm_mad_pool_put(p_bind->p_osm_pool, p_vw->p_resp_madw); p_vw->p_resp_madw = NULL; } p_bind->send_err_callback(p_bind->client_context, p_madw); } } else { osm_log(p_vend->p_log, OSM_LOG_DEBUG, "__osm_al_send_callback: " "Returning MAD to pool, TID = 0x%" PRIx64 ".\n", cl_ntoh64(p_mad->trans_id)); osm_mad_pool_put(p_bind->p_osm_pool, p_madw); goto Exit; } Exit: OSM_LOG_EXIT(p_vend->p_log); }
ib_api_status_t osm_vendor_get_guid_ca_and_port(IN osm_vendor_t * const p_vend, IN ib_net64_t const guid, OUT uint32_t * p_hca_hndl, OUT char *p_hca_id, OUT uint8_t * p_hca_idx, OUT uint32_t * p_port_num) { uint32_t caIdx; uint32_t ca_count = 0; uint8_t port_num; ib_api_status_t status = IB_ERROR; OSM_LOG_ENTER(p_vend->p_log); CL_ASSERT(p_vend); /* determine the number of CA's */ ca_count = __hca_pfs_get_num_cas(); if (!ca_count) { osm_log(p_vend->p_log, OSM_LOG_ERROR, "osm_vendor_get_guid_ca_and_port: ERR 5231: " "Fail to get Any CA Ids.\n"); goto Exit; } /* * For each CA, retrieve the CA info attributes */ for (caIdx = 1; caIdx <= ca_count; caIdx++) { pfs_ca_info_t pfs_ca_info; if (__parse_ca_info_file(p_vend, caIdx, &pfs_ca_info) == IB_SUCCESS) { /* get all the ports info */ for (port_num = 1; port_num <= pfs_ca_info.num_ports; port_num++) { uint64_t port_guid; if (!__get_port_guid_from_port_gid_tbl (p_vend, caIdx, port_num, &port_guid)) { if (cl_hton64(port_guid) == guid) { osm_log(p_vend->p_log, OSM_LOG_DEBUG, "osm_vendor_get_guid_ca_and_port: " "Found Matching guid on HCA:%d Port:%d.\n", caIdx, port_num); strcpy(p_hca_id, pfs_ca_info.name); *p_port_num = port_num; *p_hca_idx = caIdx - 1; *p_hca_hndl = 0; status = IB_SUCCESS; goto Exit; } } } } } osm_log(p_vend->p_log, OSM_LOG_ERROR, "osm_vendor_get_guid_ca_and_port: ERR 5232: " "Fail to find HCA and Port for Port Guid 0x%" PRIx64 "\n", cl_ntoh64(guid)); status = IB_INVALID_GUID; Exit: OSM_LOG_EXIT(p_vend->p_log); return (status); }
static void __osm_lftr_rcv_by_comp_mask(IN cl_map_item_t * const p_map_item, IN void *context) { const osm_lftr_search_ctxt_t *const p_ctxt = (osm_lftr_search_ctxt_t *) context; const osm_switch_t *const p_sw = (osm_switch_t *) p_map_item; const ib_lft_record_t *const p_rcvd_rec = p_ctxt->p_rcvd_rec; osm_sa_t *sa = p_ctxt->sa; ib_net64_t const comp_mask = p_ctxt->comp_mask; const osm_physp_t *const p_req_physp = p_ctxt->p_req_physp; osm_port_t *p_port; uint16_t min_lid_ho, max_lid_ho; uint16_t min_block, max_block, block; const osm_physp_t *p_physp; /* In switches, the port guid is the node guid. */ p_port = osm_get_port_by_guid(sa->p_subn, p_sw->p_node->node_info.port_guid); if (!p_port) { OSM_LOG(sa->p_log, OSM_LOG_ERROR, "ERR 4405: " "Failed to find Port by Node Guid:0x%016" PRIx64 "\n", cl_ntoh64(p_sw->p_node->node_info.node_guid)); return; } /* check that the requester physp and the current physp are under the same partition. */ p_physp = p_port->p_physp; if (!p_physp) { OSM_LOG(sa->p_log, OSM_LOG_ERROR, "ERR 4406: " "Failed to find default physical Port by Node Guid:0x%016" PRIx64 "\n", cl_ntoh64(p_sw->p_node->node_info.node_guid)); return; } if (!osm_physp_share_pkey(sa->p_log, p_req_physp, p_physp)) return; /* get the port 0 of the switch */ osm_port_get_lid_range_ho(p_port, &min_lid_ho, &max_lid_ho); /* compare the lids - if required */ if (comp_mask & IB_LFTR_COMPMASK_LID) { OSM_LOG(sa->p_log, OSM_LOG_DEBUG, "Comparing lid:%u to port lid range: %u .. %u\n", cl_ntoh16(p_rcvd_rec->lid), min_lid_ho, max_lid_ho); /* ok we are ready for range check */ if (min_lid_ho > cl_ntoh16(p_rcvd_rec->lid) || max_lid_ho < cl_ntoh16(p_rcvd_rec->lid)) return; } /* now we need to decide which blocks to output */ max_block = osm_switch_get_max_block_id_in_use(p_sw); if (comp_mask & IB_LFTR_COMPMASK_BLOCK) { min_block = cl_ntoh16(p_rcvd_rec->block_num); if (min_block > max_block) return; max_block = min_block; } else /* use as many blocks as "in use" */ min_block = 0; /* so we can add these blocks one by one ... */ for (block = min_block; block <= max_block; block++) __osm_lftr_rcv_new_lftr(sa, p_sw, p_ctxt->p_list, osm_port_get_base_lid(p_port), block); }
void osm_lr_rcv_process(IN void *context, IN void *data) { osm_sa_t *sa = context; osm_madw_t *p_madw = data; const ib_link_record_t *p_lr; const ib_sa_mad_t *p_sa_mad; const osm_port_t *p_src_port; const osm_port_t *p_dest_port; cl_qlist_t lr_list; ib_net16_t status; osm_physp_t *p_req_physp; OSM_LOG_ENTER(sa->p_log); CL_ASSERT(p_madw); p_sa_mad = osm_madw_get_sa_mad_ptr(p_madw); p_lr = ib_sa_mad_get_payload_ptr(p_sa_mad); CL_ASSERT(p_sa_mad->attr_id == IB_MAD_ATTR_LINK_RECORD); /* we only support SubnAdmGet and SubnAdmGetTable methods */ if (p_sa_mad->method != IB_MAD_METHOD_GET && p_sa_mad->method != IB_MAD_METHOD_GETTABLE) { OSM_LOG(sa->p_log, OSM_LOG_ERROR, "ERR 1804: " "Unsupported Method (%s)\n", ib_get_sa_method_str(p_sa_mad->method)); osm_sa_send_error(sa, p_madw, IB_MAD_STATUS_UNSUP_METHOD_ATTR); goto Exit; } /* update the requester physical port */ p_req_physp = osm_get_physp_by_mad_addr(sa->p_log, sa->p_subn, osm_madw_get_mad_addr_ptr (p_madw)); if (p_req_physp == NULL) { OSM_LOG(sa->p_log, OSM_LOG_ERROR, "ERR 1805: " "Cannot find requester physical port\n"); goto Exit; } if (OSM_LOG_IS_ACTIVE_V2(sa->p_log, OSM_LOG_DEBUG)) { OSM_LOG(sa->p_log, OSM_LOG_DEBUG, "Requester port GUID 0x%" PRIx64 "\n", cl_ntoh64(osm_physp_get_port_guid(p_req_physp))); osm_dump_link_record_v2(sa->p_log, p_lr, FILE_ID, OSM_LOG_DEBUG); } cl_qlist_init(&lr_list); /* Most SA functions (including this one) are read-only on the subnet object, so we grab the lock non-exclusively. */ cl_plock_acquire(sa->p_lock); status = lr_rcv_get_end_points(sa, p_madw, &p_src_port, &p_dest_port); if (status == IB_SA_MAD_STATUS_SUCCESS) lr_rcv_get_port_links(sa, p_lr, p_src_port, p_dest_port, p_sa_mad->comp_mask, &lr_list, p_req_physp); cl_plock_release(sa->p_lock); osm_sa_respond(sa, p_madw, sizeof(ib_link_record_t), &lr_list); Exit: OSM_LOG_EXIT(sa->p_log); }
osm_bind_handle_t osmv_bind_sa(IN osm_vendor_t * const p_vend, IN osm_mad_pool_t * const p_mad_pool, IN ib_net64_t port_guid) { osm_bind_info_t bind_info; osm_log_t *p_log = p_vend->p_log; ib_api_status_t status = IB_SUCCESS; osmv_sa_bind_info_t *p_sa_bind_info; cl_status_t cl_status; OSM_LOG_ENTER(p_log); osm_log(p_log, OSM_LOG_DEBUG, "osmv_bind_sa: " "Binding to port 0x%" PRIx64 ".\n", cl_ntoh64(port_guid)); bind_info.port_guid = port_guid; bind_info.mad_class = IB_MCLASS_SUBN_ADM; bind_info.class_version = 2; bind_info.is_responder = TRUE; bind_info.is_trap_processor = FALSE; bind_info.is_report_processor = TRUE; bind_info.send_q_size = 256; bind_info.recv_q_size = 256; /* allocate the new sa bind info */ p_sa_bind_info = (osmv_sa_bind_info_t *) malloc(sizeof(osmv_sa_bind_info_t)); if (!p_sa_bind_info) { osm_log(p_log, OSM_LOG_ERROR, "osmv_bind_sa: ERR 0505: " "Fail to allocate new bidn structure\n"); p_sa_bind_info = OSM_BIND_INVALID_HANDLE; goto Exit; } /* store some important context */ p_sa_bind_info->p_log = p_log; p_sa_bind_info->port_guid = port_guid; p_sa_bind_info->p_mad_pool = p_mad_pool; p_sa_bind_info->p_vendor = p_vend; p_sa_bind_info->last_lids_update_sec = 0; /* Bind to the lower level */ p_sa_bind_info->h_bind = osm_vendor_bind(p_vend, &bind_info, p_mad_pool, __osmv_sa_mad_rcv_cb, __osmv_sa_mad_err_cb, p_sa_bind_info); /* context provided to CBs */ if (p_sa_bind_info->h_bind == OSM_BIND_INVALID_HANDLE) { free(p_sa_bind_info); p_sa_bind_info = OSM_BIND_INVALID_HANDLE; osm_log(p_log, OSM_LOG_ERROR, "osmv_bind_sa: ERR 0506: " "Fail to bind to vendor SMI.\n"); goto Exit; } /* obtain the sm_lid from the vendor */ status = __osmv_get_lid_and_sm_lid_by_port_guid(p_vend, port_guid, &p_sa_bind_info-> last_lids_update_sec, &p_sa_bind_info->lid, &p_sa_bind_info->sm_lid); if (status != IB_SUCCESS) { free(p_sa_bind_info); p_sa_bind_info = OSM_BIND_INVALID_HANDLE; osm_log(p_log, OSM_LOG_ERROR, "osmv_bind_sa: ERR 0507: " "Fail to obtain the sm lid.\n"); goto Exit; } /* initialize the sync_event */ cl_event_construct(&p_sa_bind_info->sync_event); cl_status = cl_event_init(&p_sa_bind_info->sync_event, TRUE); if (cl_status != CL_SUCCESS) { osm_log(p_log, OSM_LOG_ERROR, "osmv_bind_sa: ERR 0508: " "cl_init_event failed: %s\n", ib_get_err_str(cl_status) ); free(p_sa_bind_info); p_sa_bind_info = OSM_BIND_INVALID_HANDLE; } Exit: OSM_LOG_EXIT(p_log); return (p_sa_bind_info); }
static void lr_rcv_get_physp_link(IN osm_sa_t * sa, IN const ib_link_record_t * p_lr, IN const osm_physp_t * p_src_physp, IN const osm_physp_t * p_dest_physp, IN const ib_net64_t comp_mask, IN cl_qlist_t * p_list, IN const osm_physp_t * p_req_physp) { uint8_t src_port_num; uint8_t dest_port_num; ib_net16_t from_base_lid; ib_net16_t to_base_lid; ib_net16_t lmc_mask; OSM_LOG_ENTER(sa->p_log); /* If only one end of the link is specified, determine the other side. */ if (p_src_physp) { if (p_dest_physp) { /* Ensure the two physp's are actually connected. If not, bail out. */ if (osm_physp_get_remote(p_src_physp) != p_dest_physp) goto Exit; } else { p_dest_physp = osm_physp_get_remote(p_src_physp); if (p_dest_physp == NULL) goto Exit; } } else { if (p_dest_physp) { p_src_physp = osm_physp_get_remote(p_dest_physp); if (p_src_physp == NULL) goto Exit; } else goto Exit; /* no physp's, so nothing to do */ } /* Check that the p_src_physp, p_dest_physp and p_req_physp all share a pkey (doesn't have to be the same p_key). */ if (!osm_physp_share_pkey(sa->p_log, p_src_physp, p_dest_physp, sa->p_subn->opt.allow_both_pkeys)) { OSM_LOG(sa->p_log, OSM_LOG_DEBUG, "Source and Dest PhysPorts do not share PKey\n"); goto Exit; } if (!osm_physp_share_pkey(sa->p_log, p_src_physp, p_req_physp, sa->p_subn->opt.allow_both_pkeys)) { OSM_LOG(sa->p_log, OSM_LOG_DEBUG, "Source and Requester PhysPorts do not share PKey\n"); goto Exit; } if (!osm_physp_share_pkey(sa->p_log, p_req_physp, p_dest_physp, sa->p_subn->opt.allow_both_pkeys)) { OSM_LOG(sa->p_log, OSM_LOG_DEBUG, "Requester and Dest PhysPorts do not share PKey\n"); goto Exit; } src_port_num = osm_physp_get_port_num(p_src_physp); dest_port_num = osm_physp_get_port_num(p_dest_physp); if (comp_mask & IB_LR_COMPMASK_FROM_PORT) if (src_port_num != p_lr->from_port_num) goto Exit; if (comp_mask & IB_LR_COMPMASK_TO_PORT) if (dest_port_num != p_lr->to_port_num) goto Exit; from_base_lid = get_base_lid(p_src_physp); to_base_lid = get_base_lid(p_dest_physp); lmc_mask = ~((1 << sa->p_subn->opt.lmc) - 1); lmc_mask = cl_hton16(lmc_mask); if (comp_mask & IB_LR_COMPMASK_FROM_LID) if (from_base_lid != (p_lr->from_lid & lmc_mask)) goto Exit; if (comp_mask & IB_LR_COMPMASK_TO_LID) if (to_base_lid != (p_lr->to_lid & lmc_mask)) goto Exit; OSM_LOG(sa->p_log, OSM_LOG_DEBUG, "Acquiring link record\n" "\t\t\t\tsrc port 0x%" PRIx64 " (port %u)" ", dest port 0x%" PRIx64 " (port %u)\n", cl_ntoh64(osm_physp_get_port_guid(p_src_physp)), src_port_num, cl_ntoh64(osm_physp_get_port_guid(p_dest_physp)), dest_port_num); lr_rcv_build_physp_link(sa, from_base_lid, to_base_lid, src_port_num, dest_port_num, p_list); Exit: OSM_LOG_EXIT(sa->p_log); }
/********************************************************************** * Send SubnSet(SMInfo) SMP with HANDOVER attribute to the * remote_sm indicated. **********************************************************************/ static void state_mgr_send_handover(IN osm_sm_t * sm, IN osm_remote_sm_t * p_sm) { uint8_t payload[IB_SMP_DATA_SIZE]; ib_sm_info_t *p_smi = (ib_sm_info_t *) payload; osm_madw_context_t context; const osm_port_t *p_port; ib_api_status_t status; OSM_LOG_ENTER(sm->p_log); /* * Send a query of SubnSet(SMInfo) HANDOVER to the remote sm given. */ memset(&context, 0, sizeof(context)); p_port = p_sm->p_port; if (p_port == NULL) { OSM_LOG(sm->p_log, OSM_LOG_ERROR, "ERR 3316: " "No port object on given remote_sm object\n"); goto Exit; } /* update the master_guid in the sm_state_mgr object according to */ /* the guid of the port where the new Master SM should reside. */ OSM_LOG(sm->p_log, OSM_LOG_VERBOSE, "Handing over mastership. Updating sm_state_mgr master_guid: %016" PRIx64 " (node %s)\n", cl_ntoh64(p_port->guid), p_port->p_node ? p_port->p_node->print_desc : "UNKNOWN"); sm->master_sm_guid = p_port->guid; context.smi_context.port_guid = p_port->guid; context.smi_context.set_method = TRUE; p_smi->guid = sm->p_subn->sm_port_guid; p_smi->act_count = cl_hton32(sm->p_subn->p_osm->stats.qp0_mads_sent); p_smi->pri_state = (uint8_t) (sm->p_subn->sm_state | sm->p_subn->opt.sm_priority << 4); /* * Return 0 for the SM key unless we authenticate the requester * as the master SM. */ if (ib_sminfo_get_state(&p_sm->smi) == IB_SMINFO_STATE_MASTER) { OSM_LOG(sm->p_log, OSM_LOG_DEBUG, "Responding to master SM with real sm_key\n"); p_smi->sm_key = sm->p_subn->opt.sm_key; } else { /* The requester is not authenticated as master - set sm_key to zero */ OSM_LOG(sm->p_log, OSM_LOG_DEBUG, "Responding to SM not master with zero sm_key\n"); p_smi->sm_key = 0; } status = osm_req_set(sm, osm_physp_get_dr_path_ptr(p_port->p_physp), payload, sizeof(payload), IB_MAD_ATTR_SM_INFO, IB_SMINFO_ATTR_MOD_HANDOVER, CL_DISP_MSGID_NONE, &context); if (status != IB_SUCCESS) OSM_LOG(sm->p_log, OSM_LOG_ERROR, "ERR 3317: " "Failure requesting SMInfo (%s)\n", ib_get_err_str(status)); Exit: OSM_LOG_EXIT(sm->p_log); }
static void populate_fwd_tbls(lash_t * p_lash) { osm_log_t *p_log = &p_lash->p_osm->log; osm_subn_t *p_subn = &p_lash->p_osm->subn; osm_switch_t *p_sw, *p_next_sw, *p_dst_sw; osm_port_t *port; uint16_t max_lid_ho, lid; OSM_LOG_ENTER(p_log); p_next_sw = (osm_switch_t *) cl_qmap_head(&p_subn->sw_guid_tbl); /* Go through each switch individually */ while (p_next_sw != (osm_switch_t *) cl_qmap_end(&p_subn->sw_guid_tbl)) { uint64_t current_guid; switch_t *sw; p_sw = p_next_sw; p_next_sw = (osm_switch_t *) cl_qmap_next(&p_sw->map_item); max_lid_ho = p_sw->max_lid_ho; current_guid = p_sw->p_node->node_info.port_guid; sw = p_sw->priv; memset(p_sw->new_lft, OSM_NO_PATH, p_sw->lft_size); for (lid = 1; lid <= max_lid_ho; lid++) { port = osm_get_port_by_lid_ho(p_subn, lid); if (!port) continue; p_dst_sw = get_osm_switch_from_port(port); if (p_dst_sw == p_sw) { uint8_t egress_port = port->p_node->sw ? 0 : port->p_physp->p_remote_physp->port_num; p_sw->new_lft[lid] = egress_port; OSM_LOG(p_log, OSM_LOG_VERBOSE, "LASH fwd MY SRC SRC GUID 0x%016" PRIx64 " src lash id (%d), src lid no (%u) src lash port (%d) " "DST GUID 0x%016" PRIx64 " src lash id (%d), src lash port (%d)\n", cl_ntoh64(current_guid), -1, lid, egress_port, cl_ntoh64(current_guid), -1, egress_port); } else if (p_dst_sw) { unsigned dst_lash_switch_id = get_lash_id(p_dst_sw); uint8_t lash_egress_port = (uint8_t) sw-> routing_table[dst_lash_switch_id].out_link; uint8_t physical_egress_port = get_next_port(sw, lash_egress_port); p_sw->new_lft[lid] = physical_egress_port; OSM_LOG(p_log, OSM_LOG_VERBOSE, "LASH fwd SRC GUID 0x%016" PRIx64 " src lash id (%d), " "src lid no (%u) src lash port (%d) " "DST GUID 0x%016" PRIx64 " src lash id (%d), src lash port (%d)\n", cl_ntoh64(current_guid), sw->id, lid, lash_egress_port, cl_ntoh64(p_dst_sw->p_node->node_info. port_guid), dst_lash_switch_id, physical_egress_port); } } /* for */ } OSM_LOG_EXIT(p_log); }