void osm_vendor_delete(IN osm_vendor_t ** const pp_vend) { umadt_obj_t *p_umadt_obj = (umadt_obj_t *) * pp_vend; cl_list_item_t *p_list_item; uint32_t count, i; mad_bind_info_t *p_mad_bind_info; OSM_LOG_ENTER(p_umadt_obj->p_log); cl_spinlock_acquire(&p_umadt_obj->register_lock); p_mad_bind_info = (mad_bind_info_t *) cl_qlist_head(&p_umadt_obj->register_list); count = cl_qlist_count(&p_umadt_obj->register_list); cl_spinlock_release(&p_umadt_obj->register_lock); for (i = 0; i < count; i++) { cl_spinlock_acquire(&p_umadt_obj->register_lock); p_list_item = cl_qlist_next(&p_mad_bind_info->list_item); cl_spinlock_release(&p_umadt_obj->register_lock); /* Unbind this handle */ /* osm_vendor_ubind also removesd the item from the list */ /* osm_vendor_unbind takes the list lock so release it here */ osm_vendor_unbind((osm_bind_handle_t) p_mad_bind_info); p_mad_bind_info = (mad_bind_info_t *) p_list_item; } dlclose(p_umadt_obj->umadt_handle); free(p_umadt_obj); *pp_vend = NULL; OSM_LOG_EXIT(p_umadt_obj->p_log); }
/******************************************************************** __cl_disp_worker Description: This function takes messages off the FIFO and calls Processmsg() This function executes as passive level. Inputs: p_disp - Pointer to Dispatcher object Outputs: None Returns: None ********************************************************************/ void __cl_disp_worker(IN void *context) { cl_disp_msg_t *p_msg; cl_dispatcher_t *p_disp = (cl_dispatcher_t *) context; cl_spinlock_acquire(&p_disp->lock); /* Process the FIFO until we drain it dry. */ while (cl_qlist_count(&p_disp->msg_fifo)) { /* Pop the message at the head from the FIFO. */ p_msg = (cl_disp_msg_t *) cl_qlist_remove_head(&p_disp->msg_fifo); /* we track the tim ethe last message spent in the queue */ p_disp->last_msg_queue_time_us = cl_get_time_stamp() - p_msg->in_time; /* * Release the spinlock while the message is processed. * The user's callback may reenter the dispatcher * and cause the lock to be reaquired. */ cl_spinlock_release(&p_disp->lock); p_msg->p_dest_reg->pfn_rcv_callback((void *)p_msg->p_dest_reg-> context, (void *)p_msg->p_data); cl_atomic_dec(&p_msg->p_dest_reg->ref_cnt); /* The client has seen the data. Notify the sender as appropriate. */ if (p_msg->pfn_xmt_callback) { p_msg->pfn_xmt_callback((void *)p_msg->context, (void *)p_msg->p_data); cl_atomic_dec(&p_msg->p_src_reg->ref_cnt); } /* Grab the lock for the next iteration through the list. */ cl_spinlock_acquire(&p_disp->lock); /* Return this message to the pool. */ cl_qpool_put(&p_disp->msg_pool, (cl_pool_item_t *) p_msg); } cl_spinlock_release(&p_disp->lock); }
/* Dump out the complete state of the event wheel */ void __cl_event_wheel_dump(IN cl_event_wheel_t * const p_event_wheel) { cl_list_item_t *p_list_item; cl_map_item_t *p_map_item; cl_event_wheel_reg_info_t *p_event; printf("************** Event Wheel Dump ***********************\n"); printf("Event Wheel List has %u items:\n", cl_qlist_count(&p_event_wheel->events_wheel)); p_list_item = cl_qlist_head(&p_event_wheel->events_wheel); while (p_list_item != cl_qlist_end(&p_event_wheel->events_wheel)) { p_event = PARENT_STRUCT(p_list_item, cl_event_wheel_reg_info_t, list_item); printf("Event key:0x%" PRIx64 " Context:%s NumRegs:%u\n", p_event->key, (char *)p_event->context, p_event->num_regs); /* next */ p_list_item = cl_qlist_next(p_list_item); } printf("Event Map has %u items:\n", cl_qmap_count(&p_event_wheel->events_map)); p_map_item = cl_qmap_head(&p_event_wheel->events_map); while (p_map_item != cl_qmap_end(&p_event_wheel->events_map)) { p_event = PARENT_STRUCT(p_map_item, cl_event_wheel_reg_info_t, map_item); printf("Event key:0x%" PRIx64 " Context:%s NumRegs:%u\n", p_event->key, (char *)p_event->context, p_event->num_regs); /* next */ p_map_item = cl_qmap_next(p_map_item); } }
void osm_sa_respond(osm_sa_t *sa, osm_madw_t *madw, size_t attr_size, cl_qlist_t *list) { struct item_data { cl_list_item_t list; char data[0]; }; cl_list_item_t *item; osm_madw_t *resp_madw; ib_sa_mad_t *sa_mad, *resp_sa_mad; unsigned num_rec, i; #ifndef VENDOR_RMPP_SUPPORT unsigned trim_num_rec; #endif unsigned char *p; sa_mad = osm_madw_get_sa_mad_ptr(madw); num_rec = cl_qlist_count(list); /* * C15-0.1.30: * If we do a SubnAdmGet and got more than one record it is an error! */ if (sa_mad->method == IB_MAD_METHOD_GET && num_rec > 1) { OSM_LOG(sa->p_log, OSM_LOG_ERROR, "ERR 4C05: " "Got %u records for SubnAdmGet(%s) comp_mask 0x%016" PRIx64 "\n", num_rec, ib_get_sa_attr_str(sa_mad->attr_id), cl_ntoh64(sa_mad->comp_mask)); osm_sa_send_error(sa, madw, IB_SA_MAD_STATUS_TOO_MANY_RECORDS); goto Exit; } #ifndef VENDOR_RMPP_SUPPORT trim_num_rec = (MAD_BLOCK_SIZE - IB_SA_MAD_HDR_SIZE) / attr_size; if (trim_num_rec < num_rec) { OSM_LOG(sa->p_log, OSM_LOG_VERBOSE, "Number of records:%u trimmed to:%u to fit in one MAD\n", num_rec, trim_num_rec); num_rec = trim_num_rec; } #endif OSM_LOG(sa->p_log, OSM_LOG_DEBUG, "Returning %u records\n", num_rec); if (sa_mad->method == IB_MAD_METHOD_GET && num_rec == 0) { osm_sa_send_error(sa, madw, IB_SA_MAD_STATUS_NO_RECORDS); goto Exit; } /* * Get a MAD to reply. Address of Mad is in the received mad_wrapper */ resp_madw = osm_mad_pool_get(sa->p_mad_pool, madw->h_bind, num_rec * attr_size + IB_SA_MAD_HDR_SIZE, &madw->mad_addr); if (!resp_madw) { OSM_LOG(sa->p_log, OSM_LOG_ERROR, "ERR 4C06: " "osm_mad_pool_get failed\n"); osm_sa_send_error(sa, madw, IB_SA_MAD_STATUS_NO_RESOURCES); goto Exit; } resp_sa_mad = osm_madw_get_sa_mad_ptr(resp_madw); /* Copy the MAD header back into the response mad. Set the 'R' bit and the payload length, Then copy all records from the list into the response payload. */ memcpy(resp_sa_mad, sa_mad, IB_SA_MAD_HDR_SIZE); if (resp_sa_mad->method == IB_MAD_METHOD_SET) resp_sa_mad->method = IB_MAD_METHOD_GET; resp_sa_mad->method |= IB_MAD_METHOD_RESP_MASK; /* C15-0.1.5 - always return SM_Key = 0 (table 185 p 884) */ resp_sa_mad->sm_key = 0; /* Fill in the offset (paylen will be done by the rmpp SAR) */ resp_sa_mad->attr_offset = num_rec ? ib_get_attr_offset(attr_size) : 0; p = ib_sa_mad_get_payload_ptr(resp_sa_mad); #ifndef VENDOR_RMPP_SUPPORT /* we support only one packet RMPP - so we will set the first and last flags for gettable */ if (resp_sa_mad->method == IB_MAD_METHOD_GETTABLE_RESP) { resp_sa_mad->rmpp_type = IB_RMPP_TYPE_DATA; resp_sa_mad->rmpp_flags = IB_RMPP_FLAG_FIRST | IB_RMPP_FLAG_LAST | IB_RMPP_FLAG_ACTIVE; } #else /* forcefully define the packet as RMPP one */ if (resp_sa_mad->method == IB_MAD_METHOD_GETTABLE_RESP) resp_sa_mad->rmpp_flags = IB_RMPP_FLAG_ACTIVE; #endif for (i = 0; i < num_rec; i++) { item = cl_qlist_remove_head(list); memcpy(p, ((struct item_data *)item)->data, attr_size); p += attr_size; free(item); } osm_dump_sa_mad(sa->p_log, resp_sa_mad, OSM_LOG_FRAMES); osm_sa_send(sa, resp_madw, FALSE); Exit: /* need to set the mem free ... */ item = cl_qlist_remove_head(list); while (item != cl_qlist_end(list)) { free(item); item = cl_qlist_remove_head(list); } }
int osm_qos_setup(osm_opensm_t * p_osm) { struct qos_config ca_config, sw0_config, swe_config, rtr_config; struct qos_config *cfg; cl_qmap_t *p_tbl; cl_map_item_t *p_next; osm_port_t *p_port; osm_node_t *p_node; int ret = 0; int vlarb_only; qos_mad_list_t *p_list, *p_list_next; qos_mad_item_t *p_port_mad; cl_qlist_t qos_mad_list; if (!p_osm->subn.opt.qos) return 0; OSM_LOG_ENTER(&p_osm->log); qos_build_config(&ca_config, &p_osm->subn.opt.qos_ca_options, &p_osm->subn.opt.qos_options); qos_build_config(&sw0_config, &p_osm->subn.opt.qos_sw0_options, &p_osm->subn.opt.qos_options); qos_build_config(&swe_config, &p_osm->subn.opt.qos_swe_options, &p_osm->subn.opt.qos_options); qos_build_config(&rtr_config, &p_osm->subn.opt.qos_rtr_options, &p_osm->subn.opt.qos_options); cl_qlist_init(&qos_mad_list); cl_plock_excl_acquire(&p_osm->lock); /* read QoS policy config file */ osm_qos_parse_policy_file(&p_osm->subn); p_tbl = &p_osm->subn.port_guid_tbl; p_next = cl_qmap_head(p_tbl); while (p_next != cl_qmap_end(p_tbl)) { vlarb_only = 0; p_port = (osm_port_t *) p_next; p_next = cl_qmap_next(p_next); p_list = (qos_mad_list_t *) malloc(sizeof(*p_list)); if (!p_list) return -1; memset(p_list, 0, sizeof(*p_list)); cl_qlist_init(&p_list->port_mad_list); p_node = p_port->p_node; if (p_node->sw) { if (qos_extports_setup(&p_osm->sm, p_node, &swe_config, &p_list->port_mad_list)) ret = -1; /* skip base port 0 */ if (!ib_switch_info_is_enhanced_port0 (&p_node->sw->switch_info)) goto Continue; if (ib_switch_info_get_opt_sl2vlmapping(&p_node->sw->switch_info) && p_osm->sm.p_subn->opt.use_optimized_slvl && !memcmp(&swe_config.sl2vl, &sw0_config.sl2vl, sizeof(swe_config.sl2vl))) vlarb_only = 1; cfg = &sw0_config; } else if (osm_node_get_type(p_node) == IB_NODE_TYPE_ROUTER) cfg = &rtr_config; else cfg = &ca_config; if (qos_endport_setup(&p_osm->sm, p_port->p_physp, cfg, vlarb_only, &p_list->port_mad_list)) ret = -1; Continue: /* if MAD list is not empty, add it to the global MAD list */ if (cl_qlist_count(&p_list->port_mad_list)) { cl_qlist_insert_tail(&qos_mad_list, &p_list->list_item); } else { free(p_list); } } while (cl_qlist_count(&qos_mad_list)) { p_list_next = (qos_mad_list_t *) cl_qlist_head(&qos_mad_list); while (p_list_next != (qos_mad_list_t *) cl_qlist_end(&qos_mad_list)) { p_list = p_list_next; p_list_next = (qos_mad_list_t *) cl_qlist_next(&p_list->list_item); /* next MAD to send*/ p_port_mad = (qos_mad_item_t *) cl_qlist_remove_head(&p_list->port_mad_list); osm_send_req_mad(&p_osm->sm, p_port_mad->p_madw); osm_qos_mad_delete(&p_port_mad); /* remove the QoS MAD from global MAD list */ if (cl_qlist_count(&p_list->port_mad_list) == 0) { cl_qlist_remove_item(&qos_mad_list, &p_list->list_item); free(p_list); } } } cl_plock_release(&p_osm->lock); OSM_LOG_EXIT(&p_osm->log); return ret; }
static void vl15_poller(IN void *p_ptr) { ib_api_status_t status; osm_madw_t *p_madw; osm_vl15_t *p_vl = p_ptr; cl_qlist_t *p_fifo; int32_t max_smps = p_vl->max_wire_smps; int32_t max_smps2 = p_vl->max_wire_smps2; OSM_LOG_ENTER(p_vl->p_log); if (p_vl->thread_state == OSM_THREAD_STATE_NONE) p_vl->thread_state = OSM_THREAD_STATE_RUN; while (p_vl->thread_state == OSM_THREAD_STATE_RUN) { /* Start servicing the FIFOs by pulling off MAD wrappers and passing them to the transport interface. There are lots of corner cases here so tread carefully. The unicast FIFO has priority, since somebody is waiting for a timely response. */ cl_spinlock_acquire(&p_vl->lock); if (cl_qlist_count(&p_vl->ufifo) != 0) p_fifo = &p_vl->ufifo; else p_fifo = &p_vl->rfifo; p_madw = (osm_madw_t *) cl_qlist_remove_head(p_fifo); cl_spinlock_release(&p_vl->lock); if (p_madw != (osm_madw_t *) cl_qlist_end(p_fifo)) { OSM_LOG(p_vl->p_log, OSM_LOG_DEBUG, "Servicing p_madw = %p\n", p_madw); if (osm_log_is_active(p_vl->p_log, OSM_LOG_FRAMES)) osm_dump_dr_smp(p_vl->p_log, osm_madw_get_smp_ptr(p_madw), OSM_LOG_FRAMES); vl15_send_mad(p_vl, p_madw); } else /* The VL15 FIFO is empty, so we have nothing left to do. */ status = cl_event_wait_on(&p_vl->signal, EVENT_NO_TIMEOUT, TRUE); while (p_vl->p_stats->qp0_mads_outstanding_on_wire >= max_smps && p_vl->thread_state == OSM_THREAD_STATE_RUN) { status = cl_event_wait_on(&p_vl->signal, p_vl->max_smps_timeout, TRUE); if (status == CL_TIMEOUT) { if (max_smps < max_smps2) max_smps++; break; } else if (status != CL_SUCCESS) { OSM_LOG(p_vl->p_log, OSM_LOG_ERROR, "ERR 3E02: " "Event wait failed (%s)\n", CL_STATUS_MSG(status)); break; } max_smps = p_vl->max_wire_smps; } } /* since we abort immediately when the state != OSM_THREAD_STATE_RUN we might have some mads on the queues. After the thread exits the vl15 destroy routine should put these mads back... */ OSM_LOG_EXIT(p_vl->p_log); }