Ejemplo n.º 1
0
void osm_transaction_mgr_destroy(IN osm_vendor_t * const p_vend)
{
	osm_transaction_mgr_t *trans_mgr_p;
	cl_list_item_t *p_list_item;
	cl_map_item_t *p_map_item;
	osm_madw_req_t *osm_madw_req_p;

	OSM_LOG_ENTER(p_vend->p_log);

	trans_mgr_p = (osm_transaction_mgr_t *) p_vend->p_transaction_mgr;

	if (p_vend->p_transaction_mgr != NULL) {
		/* we need to get a lock */
		cl_spinlock_acquire(&trans_mgr_p->transaction_mgr_lock);

		/* go over all the items in the list and remove them */
		p_list_item =
		    cl_qlist_remove_head(trans_mgr_p->madw_reqs_list_p);
		while (p_list_item !=
		       cl_qlist_end(trans_mgr_p->madw_reqs_list_p)) {
			osm_madw_req_p = (osm_madw_req_t *) p_list_item;

			if (osm_madw_req_p->p_madw->p_mad)
				osm_log(p_vend->p_log, OSM_LOG_DEBUG,
					"osm_transaction_mgr_destroy: "
					"Found outstanding MADW:%p  TID:<0x%"
					PRIx64 ">.\n", osm_madw_req_p->p_madw,
					osm_madw_req_p->p_madw->p_mad->
					trans_id);
			else
				osm_log(p_vend->p_log, OSM_LOG_DEBUG,
					"osm_transaction_mgr_destroy: "
					"Found outstanding MADW:%p  TID:UNDEFINED.\n",
					osm_madw_req_p->p_madw);

			/*  each item - remove it from the map */
			p_map_item = &(osm_madw_req_p->map_item);
			cl_qmap_remove_item(trans_mgr_p->madw_by_tid_map_p,
					    p_map_item);
			/*  free the item */
			free(osm_madw_req_p);
			p_list_item =
			    cl_qlist_remove_head(trans_mgr_p->madw_reqs_list_p);
		}
		/*  free the qlist and qmap */
		free(trans_mgr_p->madw_reqs_list_p);
		free(trans_mgr_p->madw_by_tid_map_p);
		/*  reliease and destroy the lock */
		cl_spinlock_release(&trans_mgr_p->transaction_mgr_lock);
		cl_spinlock_destroy(&(trans_mgr_p->transaction_mgr_lock));
		/*  destroy the timer */
		cl_timer_trim(&trans_mgr_p->madw_list_timer, 1);
		cl_timer_destroy(&trans_mgr_p->madw_list_timer);
		/*  free the transaction_manager object */
		free(trans_mgr_p);
		trans_mgr_p = NULL;
	}

	OSM_LOG_EXIT(p_vend->p_log);
}
Ejemplo n.º 2
0
cl_pool_item_t *cl_qcpool_get(IN cl_qcpool_t * const p_pool)
{
	cl_list_item_t *p_list_item;

	CL_ASSERT(p_pool);
	CL_ASSERT(p_pool->state == CL_INITIALIZED);

	if (cl_is_qlist_empty(&p_pool->free_list)) {
		/*
		 * No object is available.
		 * Return NULL if the user does not want automatic growth.
		 */
		if (!p_pool->grow_size)
			return (NULL);

		/* We ran out of elements.  Get more */
		cl_qcpool_grow(p_pool, p_pool->grow_size);
		/*
		 * We may not have gotten everything we wanted but we might have
		 * gotten something.
		 */
		if (cl_is_qlist_empty(&p_pool->free_list))
			return (NULL);
	}

	p_list_item = cl_qlist_remove_head(&p_pool->free_list);
	/* OK, at this point we have an object */
	CL_ASSERT(p_list_item != cl_qlist_end(&p_pool->free_list));
	return ((cl_pool_item_t *) p_list_item);
}
Ejemplo n.º 3
0
/** ===========================================================================
 */
void ssa_db_lft_handle(void)
{
	struct ssa_db_lft_change_rec *p_lft_change_rec;
	cl_list_item_t *p_item;

	pthread_mutex_lock(&ssa_db->lft_rec_list_lock);

	while ((p_item = cl_qlist_remove_head(&ssa_db->lft_rec_list)) !=
	       cl_qlist_end(&ssa_db->lft_rec_list)) {
		p_lft_change_rec =
		    cl_item_obj(p_item, p_lft_change_rec, list_item);
		switch (p_lft_change_rec->lft_change.flags) {
		case LFT_CHANGED_BLOCK:
			lft_block_handle(p_lft_change_rec);
			break;
		case LFT_CHANGED_LFT_TOP:
			lft_top_handle(p_lft_change_rec);
			break;
		default:
			ssa_log(SSA_LOG_ALL, "Unknown LFT change event (%d)\n",
				p_lft_change_rec->lft_change.flags);
			break;
		}
		free(p_lft_change_rec);
        }

	pthread_mutex_unlock(&ssa_db->lft_rec_list_lock);
}
Ejemplo n.º 4
0
void osm_vl15_destroy(IN osm_vl15_t * p_vl, IN struct osm_mad_pool *p_pool)
{
	osm_madw_t *p_madw;

	OSM_LOG_ENTER(p_vl->p_log);

	/*
	   Signal our threads that we're leaving.
	 */
	p_vl->thread_state = OSM_THREAD_STATE_EXIT;

	/*
	   Don't trigger unless event has been initialized.
	   Destroy the thread before we tear down the other objects.
	 */
	if (p_vl->state != OSM_VL15_STATE_INIT)
		cl_event_signal(&p_vl->signal);

	cl_thread_destroy(&p_vl->poller);

	/*
	   Return the outstanding messages to the pool
	 */

	cl_spinlock_acquire(&p_vl->lock);

	while (!cl_is_qlist_empty(&p_vl->rfifo)) {
		p_madw = (osm_madw_t *) cl_qlist_remove_head(&p_vl->rfifo);
		osm_mad_pool_put(p_pool, p_madw);
	}
	while (!cl_is_qlist_empty(&p_vl->ufifo)) {
		p_madw = (osm_madw_t *) cl_qlist_remove_head(&p_vl->ufifo);
		osm_mad_pool_put(p_pool, p_madw);
	}

	cl_spinlock_release(&p_vl->lock);

	cl_event_destroy(&p_vl->signal);
	p_vl->state = OSM_VL15_STATE_INIT;
	cl_spinlock_destroy(&p_vl->lock);

	OSM_LOG_EXIT(p_vl->p_log);
}
Ejemplo n.º 5
0
void cl_qcpool_destroy(IN cl_qcpool_t * const p_pool)
{
	/* CL_ASSERT that a non-NULL pointer was provided. */
	CL_ASSERT(p_pool);
	/* CL_ASSERT that we are in a valid state (not uninitialized memory). */
	CL_ASSERT(cl_is_state_valid(p_pool->state));

	if (p_pool->state == CL_INITIALIZED) {
		/*
		 * Assert if the user hasn't put everything back in the pool
		 * before destroying it
		 * if they haven't, then most likely they are still using memory
		 * that will be freed, and the destructor will not be called!
		 */
#ifdef _DEBUG_
		/* but we do not want "free" version to assert on this one */
		CL_ASSERT(cl_qcpool_count(p_pool) == p_pool->num_objects);
#endif
		/* call the user's destructor for each object in the pool */
		if (p_pool->pfn_dtor) {
			while (!cl_is_qlist_empty(&p_pool->free_list)) {
				p_pool->pfn_dtor((cl_pool_item_t *)
						 cl_qlist_remove_head(&p_pool->
								      free_list),
						 (void *)p_pool->context);
			}
		} else {
			cl_qlist_remove_all(&p_pool->free_list);
		}

		/* Free all allocated memory blocks. */
		while (!cl_is_qlist_empty(&p_pool->alloc_list))
			free(cl_qlist_remove_head(&p_pool->alloc_list));

		if (p_pool->component_sizes) {
			free(p_pool->component_sizes);
			p_pool->component_sizes = NULL;
		}
	}

	p_pool->state = CL_UNINITIALIZED;
}
Ejemplo n.º 6
0
void osm_vl15_shutdown(IN osm_vl15_t * p_vl, IN osm_mad_pool_t * p_mad_pool)
{
	osm_madw_t *p_madw;

	OSM_LOG_ENTER(p_vl->p_log);

	/* we only should get here after the VL15 interface was initialized */
	CL_ASSERT(p_vl->state == OSM_VL15_STATE_READY);

	/* grap a lock on the object */
	cl_spinlock_acquire(&p_vl->lock);

	/* go over all outstanding MADs and retire their transactions */

	/* first we handle the list of response MADs */
	p_madw = (osm_madw_t *) cl_qlist_remove_head(&p_vl->ufifo);
	while (p_madw != (osm_madw_t *) cl_qlist_end(&p_vl->ufifo)) {
		OSM_LOG(p_vl->p_log, OSM_LOG_DEBUG,
			"Releasing Response p_madw = %p\n", p_madw);

		osm_mad_pool_put(p_mad_pool, p_madw);

		p_madw = (osm_madw_t *) cl_qlist_remove_head(&p_vl->ufifo);
	}

	/* Request MADs we send out */
	p_madw = (osm_madw_t *) cl_qlist_remove_head(&p_vl->rfifo);
	while (p_madw != (osm_madw_t *) cl_qlist_end(&p_vl->rfifo)) {
		OSM_LOG(p_vl->p_log, OSM_LOG_DEBUG,
			"Releasing Request p_madw = %p\n", p_madw);

		osm_mad_pool_put(p_mad_pool, p_madw);
		osm_stats_dec_qp0_outstanding(p_vl->p_stats);

		p_madw = (osm_madw_t *) cl_qlist_remove_head(&p_vl->rfifo);
	}

	/* free the lock */
	cl_spinlock_release(&p_vl->lock);

	OSM_LOG_EXIT(p_vl->p_log);
}
Ejemplo n.º 7
0
void osm_vendor_delete(IN osm_vendor_t ** const pp_vend)
{
	cl_list_item_t *p_item;
	cl_list_obj_t *p_obj;
	osm_bind_handle_t bind_h;
	osm_log_t *p_log;

	OSM_LOG_ENTER((*pp_vend)->p_log);
	p_log = (*pp_vend)->p_log;

	/* go over the bind handles , unbind them and remove from list */
	p_item = cl_qlist_remove_head(&((*pp_vend)->bind_handles));
	while (p_item != cl_qlist_end(&((*pp_vend)->bind_handles))) {

		p_obj = PARENT_STRUCT(p_item, cl_list_obj_t, list_item);
		bind_h = (osm_bind_handle_t *) cl_qlist_obj(p_obj);
		osm_log(p_log, OSM_LOG_DEBUG,
			"osm_vendor_delete: unbinding bind_h:%p \n", bind_h);

		__osm_vendor_internal_unbind(bind_h);

		free(p_obj);
		/*removing from list */
		p_item = cl_qlist_remove_head(&((*pp_vend)->bind_handles));
	}

	if (NULL != ((*pp_vend)->p_transport_info)) {
		free((*pp_vend)->p_transport_info);
		(*pp_vend)->p_transport_info = NULL;
	}

	/* remove the packet randomizer object */
	if ((*pp_vend)->run_randomizer == TRUE)
		osm_pkt_randomizer_destroy(&((*pp_vend)->p_pkt_randomizer),
					   p_log);

	free(*pp_vend);
	*pp_vend = NULL;

	OSM_LOG_EXIT(p_log);
}
Ejemplo n.º 8
0
void cl_disp_shutdown(IN cl_dispatcher_t * const p_disp)
{
	CL_ASSERT(p_disp);

	/* Stop the thread pool. */
	cl_thread_pool_destroy(&p_disp->worker_threads);

	/* Process all outstanding callbacks. */
	__cl_disp_worker(p_disp);

	/* Free all registration info. */
	while (!cl_is_qlist_empty(&p_disp->reg_list))
		free(cl_qlist_remove_head(&p_disp->reg_list));
}
Ejemplo n.º 9
0
void cl_event_wheel_destroy(IN cl_event_wheel_t * const p_event_wheel)
{
	cl_list_item_t *p_list_item;
	cl_map_item_t *p_map_item;
	cl_event_wheel_reg_info_t *p_event;

	/* we need to get a lock */
	cl_spinlock_acquire(&p_event_wheel->lock);

	cl_event_wheel_dump(p_event_wheel);

	/* go over all the items in the list and remove them */
	p_list_item = cl_qlist_remove_head(&p_event_wheel->events_wheel);
	while (p_list_item != cl_qlist_end(&p_event_wheel->events_wheel)) {
		p_event =
		    PARENT_STRUCT(p_list_item, cl_event_wheel_reg_info_t,
				  list_item);

		CL_DBG("cl_event_wheel_destroy: Found outstanding event"
		       " key:<0x%" PRIx64 ">\n", p_event->key);

		/* remove it from the map */
		p_map_item = &(p_event->map_item);
		cl_qmap_remove_item(&p_event_wheel->events_map, p_map_item);
		free(p_event);	/* allocated by cl_event_wheel_reg */
		p_list_item =
		    cl_qlist_remove_head(&p_event_wheel->events_wheel);
	}

	/* destroy the timer */
	cl_timer_destroy(&p_event_wheel->timer);

	/* destroy the lock (this should be done without releasing - we don't want
	   any other run to grab the lock at this point. */
	cl_spinlock_release(&p_event_wheel->lock);
	cl_spinlock_destroy(&(p_event_wheel->lock));
}
Ejemplo n.º 10
0
/********************************************************************
   __cl_disp_worker

   Description:
   This function takes messages off the FIFO and calls Processmsg()
   This function executes as passive level.

   Inputs:
   p_disp - Pointer to Dispatcher object

   Outputs:
   None

   Returns:
   None
********************************************************************/
void __cl_disp_worker(IN void *context)
{
	cl_disp_msg_t *p_msg;
	cl_dispatcher_t *p_disp = (cl_dispatcher_t *) context;

	cl_spinlock_acquire(&p_disp->lock);

	/* Process the FIFO until we drain it dry. */
	while (cl_qlist_count(&p_disp->msg_fifo)) {
		/* Pop the message at the head from the FIFO. */
		p_msg =
		    (cl_disp_msg_t *) cl_qlist_remove_head(&p_disp->msg_fifo);

		/* we track the tim ethe last message spent in the queue */
		p_disp->last_msg_queue_time_us =
		    cl_get_time_stamp() - p_msg->in_time;

		/*
		 * Release the spinlock while the message is processed.
		 * The user's callback may reenter the dispatcher
		 * and cause the lock to be reaquired.
		 */
		cl_spinlock_release(&p_disp->lock);
		p_msg->p_dest_reg->pfn_rcv_callback((void *)p_msg->p_dest_reg->
						    context,
						    (void *)p_msg->p_data);

		cl_atomic_dec(&p_msg->p_dest_reg->ref_cnt);

		/* The client has seen the data.  Notify the sender as appropriate. */
		if (p_msg->pfn_xmt_callback) {
			p_msg->pfn_xmt_callback((void *)p_msg->context,
						(void *)p_msg->p_data);
			cl_atomic_dec(&p_msg->p_src_reg->ref_cnt);
		}

		/* Grab the lock for the next iteration through the list. */
		cl_spinlock_acquire(&p_disp->lock);

		/* Return this message to the pool. */
		cl_qpool_put(&p_disp->msg_pool, (cl_pool_item_t *) p_msg);
	}

	cl_spinlock_release(&p_disp->lock);
}
Ejemplo n.º 11
0
void osm_sa_respond(osm_sa_t *sa, osm_madw_t *madw, size_t attr_size,
		    cl_qlist_t *list)
{
	struct item_data {
		cl_list_item_t list;
		char data[0];
	};
	cl_list_item_t *item;
	osm_madw_t *resp_madw;
	ib_sa_mad_t *sa_mad, *resp_sa_mad;
	unsigned num_rec, i;
#ifndef VENDOR_RMPP_SUPPORT
	unsigned trim_num_rec;
#endif
	unsigned char *p;

	sa_mad = osm_madw_get_sa_mad_ptr(madw);
	num_rec = cl_qlist_count(list);

	/*
	 * C15-0.1.30:
	 * If we do a SubnAdmGet and got more than one record it is an error!
	 */
	if (sa_mad->method == IB_MAD_METHOD_GET && num_rec > 1) {
		OSM_LOG(sa->p_log, OSM_LOG_ERROR, "ERR 4C05: "
			"Got %u records for SubnAdmGet(%s) comp_mask 0x%016" PRIx64 "\n",
			num_rec, ib_get_sa_attr_str(sa_mad->attr_id),
			cl_ntoh64(sa_mad->comp_mask));
		osm_sa_send_error(sa, madw, IB_SA_MAD_STATUS_TOO_MANY_RECORDS);
		goto Exit;
	}

#ifndef VENDOR_RMPP_SUPPORT
	trim_num_rec = (MAD_BLOCK_SIZE - IB_SA_MAD_HDR_SIZE) / attr_size;
	if (trim_num_rec < num_rec) {
		OSM_LOG(sa->p_log, OSM_LOG_VERBOSE,
			"Number of records:%u trimmed to:%u to fit in one MAD\n",
			num_rec, trim_num_rec);
		num_rec = trim_num_rec;
	}
#endif

	OSM_LOG(sa->p_log, OSM_LOG_DEBUG, "Returning %u records\n", num_rec);

	if (sa_mad->method == IB_MAD_METHOD_GET && num_rec == 0) {
		osm_sa_send_error(sa, madw, IB_SA_MAD_STATUS_NO_RECORDS);
		goto Exit;
	}

	/*
	 * Get a MAD to reply. Address of Mad is in the received mad_wrapper
	 */
	resp_madw = osm_mad_pool_get(sa->p_mad_pool, madw->h_bind,
				     num_rec * attr_size + IB_SA_MAD_HDR_SIZE,
				     &madw->mad_addr);
	if (!resp_madw) {
		OSM_LOG(sa->p_log, OSM_LOG_ERROR, "ERR 4C06: "
			"osm_mad_pool_get failed\n");
		osm_sa_send_error(sa, madw, IB_SA_MAD_STATUS_NO_RESOURCES);
		goto Exit;
	}

	resp_sa_mad = osm_madw_get_sa_mad_ptr(resp_madw);

	/*
	   Copy the MAD header back into the response mad.
	   Set the 'R' bit and the payload length,
	   Then copy all records from the list into the response payload.
	 */

	memcpy(resp_sa_mad, sa_mad, IB_SA_MAD_HDR_SIZE);
	if (resp_sa_mad->method == IB_MAD_METHOD_SET)
		resp_sa_mad->method = IB_MAD_METHOD_GET;
	resp_sa_mad->method |= IB_MAD_METHOD_RESP_MASK;
	/* C15-0.1.5 - always return SM_Key = 0 (table 185 p 884) */
	resp_sa_mad->sm_key = 0;

	/* Fill in the offset (paylen will be done by the rmpp SAR) */
	resp_sa_mad->attr_offset = num_rec ? ib_get_attr_offset(attr_size) : 0;

	p = ib_sa_mad_get_payload_ptr(resp_sa_mad);

#ifndef VENDOR_RMPP_SUPPORT
	/* we support only one packet RMPP - so we will set the first and
	   last flags for gettable */
	if (resp_sa_mad->method == IB_MAD_METHOD_GETTABLE_RESP) {
		resp_sa_mad->rmpp_type = IB_RMPP_TYPE_DATA;
		resp_sa_mad->rmpp_flags =
		    IB_RMPP_FLAG_FIRST | IB_RMPP_FLAG_LAST |
		    IB_RMPP_FLAG_ACTIVE;
	}
#else
	/* forcefully define the packet as RMPP one */
	if (resp_sa_mad->method == IB_MAD_METHOD_GETTABLE_RESP)
		resp_sa_mad->rmpp_flags = IB_RMPP_FLAG_ACTIVE;
#endif

	for (i = 0; i < num_rec; i++) {
		item = cl_qlist_remove_head(list);
		memcpy(p, ((struct item_data *)item)->data, attr_size);
		p += attr_size;
		free(item);
	}

	osm_dump_sa_mad(sa->p_log, resp_sa_mad, OSM_LOG_FRAMES);
	osm_sa_send(sa, resp_madw, FALSE);

Exit:
	/* need to set the mem free ... */
	item = cl_qlist_remove_head(list);
	while (item != cl_qlist_end(list)) {
		free(item);
		item = cl_qlist_remove_head(list);
	}
}
Ejemplo n.º 12
0
/*        rank is a SWITCH for BFS purpose */
static int updn_subn_rank(IN updn_t * p_updn)
{
	osm_switch_t *p_sw;
	osm_physp_t *p_physp, *p_remote_physp;
	cl_qlist_t list;
	cl_map_item_t *item;
	struct updn_node *u, *remote_u;
	uint8_t num_ports, port_num;
	osm_log_t *p_log = &p_updn->p_osm->log;
	unsigned max_rank = 0;

	OSM_LOG_ENTER(p_log);
	cl_qlist_init(&list);

	/* add all roots to the list */
	for (item = cl_qmap_head(&p_updn->p_osm->subn.sw_guid_tbl);
	     item != cl_qmap_end(&p_updn->p_osm->subn.sw_guid_tbl);
	     item = cl_qmap_next(item)) {
		p_sw = (osm_switch_t *)item;
		u = p_sw->priv;
		if (!u->rank)
			cl_qlist_insert_tail(&list, &u->list);
	}

	/* BFS the list till it's empty */
	while (!cl_is_qlist_empty(&list)) {
		u = (struct updn_node *)cl_qlist_remove_head(&list);
		/* Go over all remote nodes and rank them (if not already visited) */
		p_sw = u->sw;
		num_ports = p_sw->num_ports;
		OSM_LOG(p_log, OSM_LOG_DEBUG,
			"Handling switch GUID 0x%" PRIx64 "\n",
			cl_ntoh64(osm_node_get_node_guid(p_sw->p_node)));
		for (port_num = 1; port_num < num_ports; port_num++) {
			ib_net64_t port_guid;

			/* Current port fetched in order to get remote side */
			p_physp =
			    osm_node_get_physp_ptr(p_sw->p_node, port_num);

			if (!p_physp)
				continue;

			p_remote_physp = p_physp->p_remote_physp;

			/*
			   make sure that all the following occur on p_remote_physp:
			   1. The port isn't NULL
			   2. It is a switch
			 */
			if (p_remote_physp && p_remote_physp->p_node->sw) {
				remote_u = p_remote_physp->p_node->sw->priv;
				port_guid = p_remote_physp->port_guid;

				if (remote_u->rank > u->rank + 1) {
					remote_u->rank = u->rank + 1;
					max_rank = remote_u->rank;
					cl_qlist_insert_tail(&list,
							     &remote_u->list);
					OSM_LOG(p_log, OSM_LOG_DEBUG,
						"Rank of port GUID 0x%" PRIx64
						" = %u\n", cl_ntoh64(port_guid),
						remote_u->rank);
				}
			}
		}
	}

	/* Print Summary of ranking */
	OSM_LOG(p_log, OSM_LOG_VERBOSE,
		"Subnet ranking completed. Max Node Rank = %d\n", max_rank);
	OSM_LOG_EXIT(p_log);
	return 0;
}
Ejemplo n.º 13
0
/**********************************************************************
 * This function does the bfs of min hop table calculation by guid index
 * as a starting point.
 **********************************************************************/
static int updn_bfs_by_node(IN osm_log_t * p_log, IN osm_subn_t * p_subn,
			    IN osm_switch_t * p_sw)
{
	uint8_t pn, pn_rem;
	cl_qlist_t list;
	uint16_t lid;
	struct updn_node *u;
	updn_switch_dir_t next_dir, current_dir;

	OSM_LOG_ENTER(p_log);

	lid = osm_node_get_base_lid(p_sw->p_node, 0);
	lid = cl_ntoh16(lid);
	osm_switch_set_hops(p_sw, lid, 0, 0);

	OSM_LOG(p_log, OSM_LOG_DEBUG,
		"Starting from switch - port GUID 0x%" PRIx64 " lid %u\n",
		cl_ntoh64(p_sw->p_node->node_info.port_guid), lid);

	u = p_sw->priv;
	u->dir = UP;

	/* Update list with the new element */
	cl_qlist_init(&list);
	cl_qlist_insert_tail(&list, &u->list);

	/* BFS the list till no next element */
	while (!cl_is_qlist_empty(&list)) {
		u = (struct updn_node *)cl_qlist_remove_head(&list);
		u->visited = 0;	/* cleanup */
		current_dir = u->dir;
		/* Go over all ports of the switch and find unvisited remote nodes */
		for (pn = 1; pn < u->sw->num_ports; pn++) {
			osm_node_t *p_remote_node;
			struct updn_node *rem_u;
			uint8_t current_min_hop, remote_min_hop,
			    set_hop_return_value;
			osm_switch_t *p_remote_sw;

			p_remote_node =
			    osm_node_get_remote_node(u->sw->p_node, pn,
						     &pn_rem);
			/* If no remote node OR remote node is not a SWITCH
			   continue to next pn */
			if (!p_remote_node || !p_remote_node->sw)
				continue;
			/* Fetch remote guid only after validation of remote node */
			p_remote_sw = p_remote_node->sw;
			rem_u = p_remote_sw->priv;
			/* Decide which direction to mark it (UP/DOWN) */
			next_dir = updn_get_dir(u->rank, rem_u->rank,
						u->id, rem_u->id);

			/* Check if this is a legal step : the only illegal step is going
			   from DOWN to UP */
			if ((current_dir == DOWN) && (next_dir == UP)) {
				OSM_LOG(p_log, OSM_LOG_DEBUG,
					"Avoiding move from 0x%016" PRIx64
					" to 0x%016" PRIx64 "\n",
					cl_ntoh64(osm_node_get_node_guid(u->sw->p_node)),
					cl_ntoh64(osm_node_get_node_guid(p_remote_node)));
				/* Illegal step */
				continue;
			}
			/* Set MinHop value for the current lid */
			current_min_hop = osm_switch_get_least_hops(u->sw, lid);
			/* Check hop count if better insert into list && update
			   the remote node Min Hop Table */
			remote_min_hop =
			    osm_switch_get_hop_count(p_remote_sw, lid, pn_rem);
			if (current_min_hop + 1 < remote_min_hop) {
				set_hop_return_value =
				    osm_switch_set_hops(p_remote_sw, lid,
							pn_rem,
							current_min_hop + 1);
				if (set_hop_return_value) {
					OSM_LOG(p_log, OSM_LOG_ERROR, "ERR AA01: "
						"Invalid value returned from set min hop is: %d\n",
						set_hop_return_value);
				}
				/* Check if remote port has already been visited */
				if (!rem_u->visited) {
					/* Insert updn_switch item into the list */
					rem_u->dir = next_dir;
					rem_u->visited = 1;
					cl_qlist_insert_tail(&list,
							     &rem_u->list);
				}
			}
		}
	}

	OSM_LOG_EXIT(p_log);
	return 0;
}
Ejemplo n.º 14
0
int osm_qos_setup(osm_opensm_t * p_osm)
{
	struct qos_config ca_config, sw0_config, swe_config, rtr_config;
	struct qos_config *cfg;
	cl_qmap_t *p_tbl;
	cl_map_item_t *p_next;
	osm_port_t *p_port;
	osm_node_t *p_node;
	int ret = 0;
	int vlarb_only;
	qos_mad_list_t *p_list, *p_list_next;
	qos_mad_item_t *p_port_mad;
	cl_qlist_t qos_mad_list;

	if (!p_osm->subn.opt.qos)
		return 0;

	OSM_LOG_ENTER(&p_osm->log);

	qos_build_config(&ca_config, &p_osm->subn.opt.qos_ca_options,
			 &p_osm->subn.opt.qos_options);
	qos_build_config(&sw0_config, &p_osm->subn.opt.qos_sw0_options,
			 &p_osm->subn.opt.qos_options);
	qos_build_config(&swe_config, &p_osm->subn.opt.qos_swe_options,
			 &p_osm->subn.opt.qos_options);
	qos_build_config(&rtr_config, &p_osm->subn.opt.qos_rtr_options,
			 &p_osm->subn.opt.qos_options);

	cl_qlist_init(&qos_mad_list);

	cl_plock_excl_acquire(&p_osm->lock);

	/* read QoS policy config file */
	osm_qos_parse_policy_file(&p_osm->subn);
	p_tbl = &p_osm->subn.port_guid_tbl;
	p_next = cl_qmap_head(p_tbl);
	while (p_next != cl_qmap_end(p_tbl)) {
		vlarb_only = 0;
		p_port = (osm_port_t *) p_next;
		p_next = cl_qmap_next(p_next);

		p_list = (qos_mad_list_t *) malloc(sizeof(*p_list));
		if (!p_list)
			return -1;

		memset(p_list, 0, sizeof(*p_list));

		cl_qlist_init(&p_list->port_mad_list);

		p_node = p_port->p_node;
		if (p_node->sw) {
			if (qos_extports_setup(&p_osm->sm, p_node, &swe_config,
					       &p_list->port_mad_list))
				ret = -1;

			/* skip base port 0 */
			if (!ib_switch_info_is_enhanced_port0
			    (&p_node->sw->switch_info))
				goto Continue;

			if (ib_switch_info_get_opt_sl2vlmapping(&p_node->sw->switch_info) &&
			    p_osm->sm.p_subn->opt.use_optimized_slvl &&
			    !memcmp(&swe_config.sl2vl, &sw0_config.sl2vl,
				    sizeof(swe_config.sl2vl)))
				vlarb_only = 1;

			cfg = &sw0_config;
		} else if (osm_node_get_type(p_node) == IB_NODE_TYPE_ROUTER)
			cfg = &rtr_config;
		else
			cfg = &ca_config;

		if (qos_endport_setup(&p_osm->sm, p_port->p_physp, cfg,
				      vlarb_only, &p_list->port_mad_list))

			ret = -1;
Continue:
		/* if MAD list is not empty, add it to the global MAD list */
		if (cl_qlist_count(&p_list->port_mad_list)) {
			cl_qlist_insert_tail(&qos_mad_list, &p_list->list_item);
		} else {
			free(p_list);
		}
	}
	while (cl_qlist_count(&qos_mad_list)) {
		p_list_next = (qos_mad_list_t *) cl_qlist_head(&qos_mad_list);
		while (p_list_next !=
			(qos_mad_list_t *) cl_qlist_end(&qos_mad_list)) {
			p_list = p_list_next;
			p_list_next = (qos_mad_list_t *)
				      cl_qlist_next(&p_list->list_item);
			/* next MAD to send*/
			p_port_mad = (qos_mad_item_t *)
				     cl_qlist_remove_head(&p_list->port_mad_list);
			osm_send_req_mad(&p_osm->sm, p_port_mad->p_madw);
			osm_qos_mad_delete(&p_port_mad);
			/* remove the QoS MAD from global MAD list */
			if (cl_qlist_count(&p_list->port_mad_list) == 0) {
				cl_qlist_remove_item(&qos_mad_list, &p_list->list_item);
				free(p_list);
			}
		}
	}

	cl_plock_release(&p_osm->lock);
	OSM_LOG_EXIT(&p_osm->log);

	return ret;
}
Ejemplo n.º 15
0
static void vl15_poller(IN void *p_ptr)
{
	ib_api_status_t status;
	osm_madw_t *p_madw;
	osm_vl15_t *p_vl = p_ptr;
	cl_qlist_t *p_fifo;
	int32_t max_smps = p_vl->max_wire_smps;
	int32_t max_smps2 = p_vl->max_wire_smps2;

	OSM_LOG_ENTER(p_vl->p_log);

	if (p_vl->thread_state == OSM_THREAD_STATE_NONE)
		p_vl->thread_state = OSM_THREAD_STATE_RUN;

	while (p_vl->thread_state == OSM_THREAD_STATE_RUN) {
		/*
		   Start servicing the FIFOs by pulling off MAD wrappers
		   and passing them to the transport interface.
		   There are lots of corner cases here so tread carefully.

		   The unicast FIFO has priority, since somebody is waiting
		   for a timely response.
		 */
		cl_spinlock_acquire(&p_vl->lock);

		if (cl_qlist_count(&p_vl->ufifo) != 0)
			p_fifo = &p_vl->ufifo;
		else
			p_fifo = &p_vl->rfifo;

		p_madw = (osm_madw_t *) cl_qlist_remove_head(p_fifo);

		cl_spinlock_release(&p_vl->lock);

		if (p_madw != (osm_madw_t *) cl_qlist_end(p_fifo)) {
			OSM_LOG(p_vl->p_log, OSM_LOG_DEBUG,
				"Servicing p_madw = %p\n", p_madw);
			if (osm_log_is_active(p_vl->p_log, OSM_LOG_FRAMES))
				osm_dump_dr_smp(p_vl->p_log,
						osm_madw_get_smp_ptr(p_madw),
						OSM_LOG_FRAMES);

			vl15_send_mad(p_vl, p_madw);
		} else
			/*
			   The VL15 FIFO is empty, so we have nothing left to do.
			 */
			status = cl_event_wait_on(&p_vl->signal,
						  EVENT_NO_TIMEOUT, TRUE);

		while (p_vl->p_stats->qp0_mads_outstanding_on_wire >= max_smps &&
		       p_vl->thread_state == OSM_THREAD_STATE_RUN) {
			status = cl_event_wait_on(&p_vl->signal,
						  p_vl->max_smps_timeout,
						  TRUE);
			if (status == CL_TIMEOUT) {
				if (max_smps < max_smps2)
					max_smps++;
				break;
			} else if (status != CL_SUCCESS) {
				OSM_LOG(p_vl->p_log, OSM_LOG_ERROR, "ERR 3E02: "
					"Event wait failed (%s)\n",
					CL_STATUS_MSG(status));
				break;
			}
			max_smps = p_vl->max_wire_smps;
		}
	}

	/*
	   since we abort immediately when the state != OSM_THREAD_STATE_RUN
	   we might have some mads on the queues. After the thread exits
	   the vl15 destroy routine should put these mads back...
	 */

	OSM_LOG_EXIT(p_vl->p_log);
}
Ejemplo n.º 16
0
static void __cl_event_wheel_callback(IN void *context)
{
	cl_event_wheel_t *p_event_wheel = (cl_event_wheel_t *) context;
	cl_list_item_t *p_list_item, *p_prev_event_list_item;
	cl_list_item_t *p_list_next_item;
	cl_event_wheel_reg_info_t *p_event;
	uint64_t current_time;
	uint64_t next_aging_time;
	uint32_t new_timeout;
	cl_status_t cl_status;

	/* might be during closing ...  */
	if (p_event_wheel->closing)
		return;

	current_time = cl_get_time_stamp();

	if (NULL != p_event_wheel->p_external_lock)

		/* Take care of the order of acquiring locks to avoid the deadlock!
		 * The external lock goes first.
		 */
		cl_spinlock_acquire(p_event_wheel->p_external_lock);

	cl_spinlock_acquire(&p_event_wheel->lock);

	p_list_item = cl_qlist_head(&p_event_wheel->events_wheel);
	if (p_list_item == cl_qlist_end(&p_event_wheel->events_wheel))
		/* the list is empty - nothing to do */
		goto Exit;

	/* we found such an item.  get the p_event */
	p_event =
	    PARENT_STRUCT(p_list_item, cl_event_wheel_reg_info_t, list_item);

	while (p_event->aging_time <= current_time) {
		/* this object has aged - invoke it's callback */
		if (p_event->pfn_aged_callback)
			next_aging_time =
			    p_event->pfn_aged_callback(p_event->key,
						       p_event->num_regs,
						       p_event->context);
		else
			next_aging_time = 0;

		/* point to the next object in the wheel */
		p_list_next_item = cl_qlist_next(p_list_item);

		/* We need to retire the event if the next aging time passed */
		if (next_aging_time < current_time) {
			/* remove it from the map */
			cl_qmap_remove_item(&p_event_wheel->events_map,
					    &(p_event->map_item));

			/* pop p_event from the wheel */
			cl_qlist_remove_head(&p_event_wheel->events_wheel);

			/* delete the event info object - allocated by cl_event_wheel_reg */
			free(p_event);
		} else {
			/* update the required aging time */
			p_event->aging_time = next_aging_time;
			p_event->num_regs++;

			/* do not remove from the map  - but remove from the list head and
			   place in the correct position */

			/* pop p_event from the wheel */
			cl_qlist_remove_head(&p_event_wheel->events_wheel);

			/* find the event that ages just before */
			p_prev_event_list_item =
			    cl_qlist_find_from_tail(&p_event_wheel->
						    events_wheel,
						    __event_will_age_before,
						    &p_event->aging_time);

			/* insert just after */
			cl_qlist_insert_next(&p_event_wheel->events_wheel,
					     p_prev_event_list_item,
					     &p_event->list_item);

			/* as we have modified the list - restart from first item: */
			p_list_next_item =
			    cl_qlist_head(&p_event_wheel->events_wheel);
		}

		/* advance to next event */
		p_list_item = p_list_next_item;
		if (p_list_item == cl_qlist_end(&p_event_wheel->events_wheel))
			/* the list is empty - nothing to do */
			break;

		/* get the p_event */
		p_event =
		    PARENT_STRUCT(p_list_item, cl_event_wheel_reg_info_t,
				  list_item);
	}

	/* We need to restart the timer only if the list is not empty now */
	if (p_list_item != cl_qlist_end(&p_event_wheel->events_wheel)) {
		/* get the p_event */
		p_event =
		    PARENT_STRUCT(p_list_item, cl_event_wheel_reg_info_t,
				  list_item);

		/* start the timer to the timeout [msec] */
		new_timeout =
		    (uint32_t) ((p_event->aging_time - current_time + 500) / 1000);
		CL_DBG("__cl_event_wheel_callback: Restart timer in: "
		       "%u [msec]\n", new_timeout);
		cl_status = cl_timer_start(&p_event_wheel->timer, new_timeout);
		if (cl_status != CL_SUCCESS) {
			CL_DBG("__cl_event_wheel_callback : ERR 6200: "
			       "Failed to start timer\n");
		}
	}

	/* release the lock */
Exit:
	cl_spinlock_release(&p_event_wheel->lock);
	if (NULL != p_event_wheel->p_external_lock)
		cl_spinlock_release(p_event_wheel->p_external_lock);
}