Example #1
0
void cl_qcpool_destroy(IN cl_qcpool_t * const p_pool)
{
	/* CL_ASSERT that a non-NULL pointer was provided. */
	CL_ASSERT(p_pool);
	/* CL_ASSERT that we are in a valid state (not uninitialized memory). */
	CL_ASSERT(cl_is_state_valid(p_pool->state));

	if (p_pool->state == CL_INITIALIZED) {
		/*
		 * Assert if the user hasn't put everything back in the pool
		 * before destroying it
		 * if they haven't, then most likely they are still using memory
		 * that will be freed, and the destructor will not be called!
		 */
#ifdef _DEBUG_
		/* but we do not want "free" version to assert on this one */
		CL_ASSERT(cl_qcpool_count(p_pool) == p_pool->num_objects);
#endif
		/* call the user's destructor for each object in the pool */
		if (p_pool->pfn_dtor) {
			while (!cl_is_qlist_empty(&p_pool->free_list)) {
				p_pool->pfn_dtor((cl_pool_item_t *)
						 cl_qlist_remove_head(&p_pool->
								      free_list),
						 (void *)p_pool->context);
			}
		} else {
			cl_qlist_remove_all(&p_pool->free_list);
		}

		/* Free all allocated memory blocks. */
		while (!cl_is_qlist_empty(&p_pool->alloc_list))
			free(cl_qlist_remove_head(&p_pool->alloc_list));

		if (p_pool->component_sizes) {
			free(p_pool->component_sizes);
			p_pool->component_sizes = NULL;
		}
	}

	p_pool->state = CL_UNINITIALIZED;
}
void clPyGlueInit(char* appModule,void (*init_extensions[])(void),int argc, char**argv)
{
  char buf[1024];
  ClRcT rc;
    Py_Initialize();
    PySys_SetArgv(argc, argv);
    PyEval_InitThreads();
    
    if (init_extensions) 
      {
        int i = 0;
        for(i=0; init_extensions[i]!=NULL; i++) (*init_extensions[i])();
      }
    thrdState = PyThreadState_Get();

    rc = clOsalMutexInit(&pyMutex);
    CL_ASSERT(rc==CL_OK); 

    rc = clOsalCondInit(&event);
    CL_ASSERT(rc==CL_OK); 

    rc = clOsalMutexLock(&pyMutex);
    CL_ASSERT(rc==CL_OK); 

    PyThreadState_Swap(thrdState);

    PyRun_SimpleString("import os, os.path, sys\n");
    snprintf(buf,1024,"sys.path.append(os.path.realpath('%s'))\n",CL_APP_BINDIR);
    clprintf(CL_LOG_SEV_INFO, buf);
    PyRun_SimpleString(buf);
    //PyRun_SimpleString("sys.path.append(os.path.realpath('../../bin'))\n");
    snprintf(buf,1024,"from %s import *\n",appModule);
    clprintf(CL_LOG_SEV_INFO, buf);
    PyRun_SimpleString(buf);

    PyThreadState_Swap(NULL);
    PyEval_ReleaseLock();

    rc=clOsalMutexUnlock(&pyMutex);
    CL_ASSERT(rc==CL_OK); 

}
Example #3
0
void osm_transaction_mgr_init(IN osm_vendor_t * const p_vend)
{
	cl_status_t cl_status;
	osm_transaction_mgr_t *trans_mgr_p;
	OSM_LOG_ENTER(p_vend->p_log);

	CL_ASSERT(p_vend->p_transaction_mgr == NULL);

	(osm_transaction_mgr_t *) p_vend->p_transaction_mgr =
	    (osm_transaction_mgr_t *) malloc(sizeof(osm_transaction_mgr_t));

	trans_mgr_p = (osm_transaction_mgr_t *) p_vend->p_transaction_mgr;

	/*  construct lock object  */
	cl_spinlock_construct(&(trans_mgr_p->transaction_mgr_lock));
	CL_ASSERT(cl_spinlock_init(&(trans_mgr_p->transaction_mgr_lock)) ==
		  CL_SUCCESS);

	/*  initialize the qlist */
	trans_mgr_p->madw_reqs_list_p =
	    (cl_qlist_t *) malloc(sizeof(cl_qlist_t));
	cl_qlist_init(trans_mgr_p->madw_reqs_list_p);

	/*  initialize the qmap */
	trans_mgr_p->madw_by_tid_map_p =
	    (cl_qmap_t *) malloc(sizeof(cl_qmap_t));
	cl_qmap_init(trans_mgr_p->madw_by_tid_map_p);

	/*  create the timer used by the madw_req_list */
	cl_timer_construct(&(trans_mgr_p->madw_list_timer));

	/*  init the timer with timeout. */
	cl_status = cl_timer_init(&trans_mgr_p->madw_list_timer,
				  __osm_transaction_mgr_callback, p_vend);

	if (cl_status != CL_SUCCESS) {
		osm_log(p_vend->p_log, OSM_LOG_ERROR,
			"osm_transaction_mgr_init : ERROR 1000: "
			"Failed to initialize madw_reqs_list timer\n");
	}
	OSM_LOG_EXIT(p_vend->p_log);
}
/**********************************************************************
 The plock must be held before calling this function.
**********************************************************************/
static void si_rcv_get_fwd_tbl(IN osm_sm_t * sm, IN osm_switch_t * p_sw)
{
	osm_madw_context_t context;
	osm_dr_path_t *p_dr_path;
	osm_physp_t *p_physp;
	osm_node_t *p_node;
	uint32_t block_id_ho;
	uint32_t max_block_id_ho;
	ib_api_status_t status = IB_SUCCESS;

	OSM_LOG_ENTER(sm->p_log);

	CL_ASSERT(p_sw);

	p_node = p_sw->p_node;

	CL_ASSERT(osm_node_get_type(p_node) == IB_NODE_TYPE_SWITCH);

	context.lft_context.node_guid = osm_node_get_node_guid(p_node);
	context.lft_context.set_method = FALSE;

	max_block_id_ho = osm_switch_get_max_block_id_in_use(p_sw);

	p_physp = osm_node_get_physp_ptr(p_node, 0);
	p_dr_path = osm_physp_get_dr_path_ptr(p_physp);

	for (block_id_ho = 0; block_id_ho <= max_block_id_ho; block_id_ho++) {
		OSM_LOG(sm->p_log, OSM_LOG_DEBUG,
			"Retrieving FT block %u\n", block_id_ho);

		status = osm_req_get(sm, p_dr_path, IB_MAD_ATTR_LIN_FWD_TBL,
				     cl_hton32(block_id_ho),
				     CL_DISP_MSGID_NONE, &context);
		if (status != IB_SUCCESS)
			/* continue the loop despite the error */
			OSM_LOG(sm->p_log, OSM_LOG_ERROR, "ERR 3603: "
				"Failure initiating PortInfo request (%s)\n",
				ib_get_err_str(status));
	}

	OSM_LOG_EXIT(sm->p_log);
}
void osm_ucast_mgr_destroy(IN osm_ucast_mgr_t * p_mgr)
{
	CL_ASSERT(p_mgr);

	OSM_LOG_ENTER(p_mgr->p_log);

	if (p_mgr->cache_valid)
		osm_ucast_cache_invalidate(p_mgr);

	OSM_LOG_EXIT(p_mgr->p_log);
}
/*----------------------------------------------------------------------------
 *  Cluster Track Callback Handler
 *---------------------------------------------------------------------------*/
ClRcT clGmsClusterTrackCallbackHandler(
    CL_IN   ClGmsClusterTrackCallbackDataT* const res)
{
    ClRcT rc = CL_OK;
    struct gms_instance *gms_instance_ptr = NULL;
    ClGmsHandleT gmsHandle = CL_GMS_INVALID_HANDLE;

    CL_ASSERT(res != NULL);
    clLog(INFO,NA,NA,"received cluster track callback");

    gmsHandle = res->gmsHandle;
    rc = clHandleCheckout(gmsHandleDb, gmsHandle, (void**)&gms_instance_ptr);
    if (rc != CL_OK)
    {
        goto error_free_res;
    }

    if (gms_instance_ptr == NULL)
    {
        rc = CL_GMS_RC(CL_ERR_NULL_POINTER);
        goto error_free_res;
    }

    if (gms_instance_ptr->callbacks.clGmsClusterTrackCallback == NULL)
    {
        rc = CL_GMS_RC(CL_ERR_NO_CALLBACK);
        goto error_checkin_free_res;
    }

    /*
     * Calling the user's callback function with the data.  The user cannot
     * free the data we provide.  If it needs to reatin it, it has to copy
     * it out from what we provide here.
     */
            (*gms_instance_ptr->callbacks.clGmsClusterTrackCallback)
            (gmsHandle, &res->buffer, res->numberOfMembers, res->rc);
  

error_checkin_free_res:
    if (clHandleCheckin(gmsHandleDb, gmsHandle) != CL_OK)
    {
        clLogError(CLM,NA,
                   "\nclHandleCheckin failed");
    }

error_free_res:
    /* Need to free data (res) if are not able to call the actual callback */
    if (res->buffer.notification != NULL)
    {
        clHeapFree((void*)res->buffer.notification);
    }
    clHeapFree((void*)res);
    return rc;
}
Example #7
0
size_t
cl_ptr_vector_find_from_start(IN const cl_ptr_vector_t * const p_vector,
			      IN cl_pfn_ptr_vec_find_t pfn_callback,
			      IN const void *const context)
{
	size_t i;

	CL_ASSERT(p_vector);
	CL_ASSERT(p_vector->state == CL_INITIALIZED);
	CL_ASSERT(pfn_callback);

	for (i = 0; i < p_vector->size; i++) {
		/* Invoke the callback */
		if (pfn_callback(i, (void *)p_vector->p_ptr_array[i],
				 (void *)context) == CL_SUCCESS) {
			break;
		}
	}
	return (i);
}
ClRcT clTransportNotifyRegister(ClTransportNotifyCallbackT callback, ClPtrT arg)
{
    ClTransportNotifyRegistrantT *registrant = NULL;
    if(!callback) return CL_ERR_INVALID_PARAMETER;
    registrant = clHeapCalloc(1, sizeof(*registrant));
    CL_ASSERT(registrant != NULL);
    registrant->callback = callback;
    registrant->arg = arg;
    clListAddTail(&registrant->list, &gClXportNotifyRegistrants);
    return CL_OK;
}
ClRcT clMemPartInitialize(ClMemPartHandleT *pMemPartHandle, ClUint32T memPartSize)
{
    ClCharT *pool = NULL;
    ClInt32T i;
    ClRcT rc = CL_OK;
    ClMemPartT *pMemPart = NULL;

    if(!pMemPartHandle)
        return CL_ERR_INVALID_PARAMETER;

    if(!memPartSize)
        memPartSize = CL_MEM_PART_SIZE;

    pMemPart = calloc(1, sizeof(*pMemPart));
    CL_ASSERT(pMemPart !=  NULL);

    rc= clOsalMutexInit(&pMemPart->mutex);
    CL_ASSERT(rc == CL_OK);

    for(i = 0; i < CL_MEM_PART_EXPANSION_SLOTS; ++i)
        pMemPart->partitions[i] = NULL;

    pMemPart->index = 0;
    pool = malloc(memPartSize);
    if(!pool)
    {
        CL_DEBUG_PRINT(CL_DEBUG_ERROR, ("CALLOC failed for size [%d] while trying to create MEM partition\n", 
                                        memPartSize));
        return CL_ERR_NO_MEMORY;
    }
    pMemPart->partId = memPartCreate(pool, memPartSize);
    if(!pMemPart->partId)
    {
        free(pool);
        CL_DEBUG_PRINT(CL_DEBUG_ERROR, ("memPartCreate for size [%d] failed\n", memPartSize));
        return CL_ERR_NO_MEMORY;
    }
    pMemPart->partitions[pMemPart->index++] = pool;
    *pMemPartHandle = (ClMemPartHandleT)pMemPart;
    return CL_OK;
}
static int cycle_exists(cdg_vertex_t * start, cdg_vertex_t * current,
			cdg_vertex_t * prev, int visit_num)
{
	int i, new_visit_num;
	int cycle_found = 0;

	if (current != NULL && current->visiting_number > 0) {
		if (visit_num > current->visiting_number && current->seen == 0) {
			cycle_found = 1;
		}
	} else {
		if (current == NULL) {
			current = start;
			CL_ASSERT(prev == NULL);
		}

		current->visiting_number = visit_num;

		if (prev != NULL) {
			prev->next = current;
			CL_ASSERT(prev->to == current->from);
			CL_ASSERT(prev->visiting_number > 0);
		}

		new_visit_num = visit_num + 1;

		for (i = 0; i < current->num_deps; i++) {
			cycle_found =
			    cycle_exists(start, current->deps[i].v, current,
					 new_visit_num);
			if (cycle_found == 1)
				i = current->num_deps;
		}

		current->seen = 1;
		if (prev != NULL)
			prev->next = NULL;
	}

	return cycle_found;
}
/**
 *  ReStarts the extended state machine instance.
 *
 *  Please refer to the FSM Restart API.
 *
 *  @param smThis State Machine Object
 *
 *  @returns 
 *    CL_OK on CL_OK (successful start) <br/>
 *    CL_SM_RC(CL_ERR_NULL_POINTER) on invalid/null instance handle <br/>
 *
 *  @see #clSmInstanceStart
 */
ClRcT
clEsmInstanceRestart(ClExSmInstancePtrT smThis
                  )
{
    CL_ASSERT(smThis);
    if(smThis)
      {
        return clSmInstanceRestart(smThis->fsm);
      }
    
    return CL_SM_RC(CL_ERR_NULL_POINTER);
}
void clEoQueueStatsStop(ClUint32T queueSize, ClEoJobT *pJob, ClEoQueueStatsT *pStats)
{
    ClUint8T proto = pJob->msgParam.protoType;
    ClUint8T priority = CL_EO_RECV_QUEUE_PRI(pJob->msgParam);
    ClUint64T timeDiff=0;
    gettimeofday(&pStats->end, NULL);
    CL_ASSERT(pStats->priority == priority);
    CL_ASSERT(pStats->proto == proto);
    pStats->end.tv_sec -= pStats->start.tv_sec;
    pStats->end.tv_usec -= pStats->start.tv_usec;
    if(pStats->end.tv_sec < 0)
        pStats->end.tv_sec = 0, pStats->end.tv_usec = 0;
    else if(pStats->end.tv_usec < 0)
    {
        --pStats->end.tv_sec;
        pStats->end.tv_usec += 1000000L;
    }
    timeDiff = (ClUint64T)pStats->end.tv_sec*1000000L + pStats->end.tv_usec;
    pStats->totalTime = timeDiff;
    clEoQueueStatsUpdate(pStats);
}
Example #13
0
/*
  Since a race can accure on requests. Meaning - a response is received before
  the send_callback is called - we will save both the madw_p and the fact
  whether or not it is a response. A race can occure only on requests that did
  not fail, and then the madw_p will be put back in the pool before the
  callback.
*/
uint64_t __osm_set_wrid_by_p_madw(IN osm_madw_t * p_madw)
{
	uint64_t wrid = 0;

	CL_ASSERT(p_madw->p_mad);

	memcpy(&wrid, &p_madw, sizeof(osm_madw_t *));
	wrid = (wrid << 1) |
	    ib_mad_is_response(p_madw->p_mad) |
	    (p_madw->p_mad->method == IB_MAD_METHOD_TRAP_REPRESS);
	return wrid;
}
void osm_nd_rcv_process(IN void *context, IN void *data)
{
	osm_sm_t *sm = context;
	osm_madw_t *p_madw = data;
	ib_node_desc_t *p_nd;
	ib_smp_t *p_smp;
	osm_node_t *p_node;
	ib_net64_t node_guid;

	CL_ASSERT(sm);

	OSM_LOG_ENTER(sm->p_log);

	CL_ASSERT(p_madw);

	p_smp = osm_madw_get_smp_ptr(p_madw);
	if (ib_smp_get_status(p_smp)) {
		OSM_LOG(sm->p_log, OSM_LOG_DEBUG,
			"MAD status 0x%x received\n",
			cl_ntoh16(ib_smp_get_status(p_smp)));
		goto Exit;
	}

	p_nd = ib_smp_get_payload_ptr(p_smp);

	/* Acquire the node object and add the node description. */
	node_guid = osm_madw_get_nd_context_ptr(p_madw)->node_guid;
	CL_PLOCK_EXCL_ACQUIRE(sm->p_lock);
	p_node = osm_get_node_by_guid(sm->p_subn, node_guid);
	if (!p_node)
		OSM_LOG(sm->p_log, OSM_LOG_ERROR, "ERR 0B01: "
			"NodeDescription received for nonexistent node "
			"0x%" PRIx64 "\n", cl_ntoh64(node_guid));
	else
		nd_rcv_process_nd(sm, p_node, p_nd);

	CL_PLOCK_RELEASE(sm->p_lock);
Exit:
	OSM_LOG_EXIT(sm->p_log);
}
Example #15
0
osm_madw_t *osm_mad_pool_get(IN osm_mad_pool_t * const p_pool,
                             IN osm_bind_handle_t h_bind,
                             IN const uint32_t total_size,
                             IN const osm_mad_addr_t * const p_mad_addr)
{
    osm_madw_t *p_madw;
    ib_mad_t *p_mad;

    CL_ASSERT(h_bind != OSM_BIND_INVALID_HANDLE);
    CL_ASSERT(total_size);

    /*
       First, acquire a mad wrapper from the mad wrapper pool.
     */
    p_madw = malloc(sizeof(*p_madw));
    if (p_madw == NULL)
        goto Exit;

    osm_madw_init(p_madw, h_bind, total_size, p_mad_addr);

    /*
       Next, acquire a wire mad of the specified size.
     */
    p_mad = osm_vendor_get(h_bind, total_size, &p_madw->vend_wrap);
    if (p_mad == NULL) {
        /* Don't leak wrappers! */
        free(p_madw);
        p_madw = NULL;
        goto Exit;
    }

    cl_atomic_inc(&p_pool->mads_out);
    /*
       Finally, attach the wire MAD to this wrapper.
     */
    osm_madw_set_mad(p_madw, p_mad);

Exit:
    return p_madw;
}
ib_api_status_t
__osmv_txnmgr_lookup(IN osmv_txn_mgr_t * p_tx_mgr,
		     IN uint64_t key, OUT osmv_txn_ctx_t ** pp_txn)
{
	ib_api_status_t status = IB_SUCCESS;
	cl_map_item_t *p_item;
	cl_map_obj_t *p_obj;

	uint64_t tmp_key;

	OSM_LOG_ENTER(p_tx_mgr->p_log);

	CL_ASSERT(p_tx_mgr);
	CL_ASSERT(pp_txn);

	osm_log(p_tx_mgr->p_log, OSM_LOG_DEBUG,
		"__osmv_txnmgr_lookup: "
		"Looking for key: 0x%llX in map ptr:%p\n", key,
		p_tx_mgr->p_txn_map);

	p_item = cl_qmap_head(p_tx_mgr->p_txn_map);
	while (p_item != cl_qmap_end(p_tx_mgr->p_txn_map)) {
		tmp_key = cl_qmap_key(p_item);
		osm_log(p_tx_mgr->p_log, OSM_LOG_DEBUG,
			"__osmv_txnmgr_lookup: "
			"Found key 0x%llX \n", tmp_key);
		p_item = cl_qmap_next(p_item);
	}

	p_item = cl_qmap_get(p_tx_mgr->p_txn_map, key);
	if (cl_qmap_end(p_tx_mgr->p_txn_map) == p_item) {
		status = IB_NOT_FOUND;
	} else {
		p_obj = PARENT_STRUCT(p_item, cl_map_obj_t, item);
		*pp_txn = cl_qmap_obj(p_obj);
	}

	OSM_LOG_EXIT(p_tx_mgr->p_log);
	return status;
}
/* 
 * clDispatchSelectionObject returns the selection object [readFd] associated
 * with this particular initialization of the dispatch library 
 */
ClRcT   clDispatchSelectionObjectGet(
        CL_IN   ClHandleT           dispatchHandle,
        CL_OUT  ClSelectionObjectT* pSelectionObject)
{
    ClRcT   rc = CL_OK;
    ClDispatchDbEntryT* thisDbEntry = NULL;

    if (pSelectionObject == NULL)
    {
        return CL_ERR_NULL_POINTER;
    }

    CHECK_LIB_INIT;

    rc = clHandleCheckout(databaseHandle, dispatchHandle, (void *)&thisDbEntry);
    if (rc != CL_OK)
    {
        return CL_ERR_INVALID_HANDLE;
    }
    CL_ASSERT(thisDbEntry != NULL);

    rc = clOsalMutexLock(thisDbEntry->dispatchMutex);
    if (rc != CL_OK)
    {
        goto error_return;
    }

    if (thisDbEntry->shouldDelete == CL_TRUE)
    {
        rc = CL_ERR_INVALID_HANDLE;
        goto error_unlock_return;
    }
    
    *pSelectionObject = (ClSelectionObjectT)thisDbEntry->readFd;

error_unlock_return:
    rc = clOsalMutexUnlock(thisDbEntry->dispatchMutex);
    if (rc != CL_OK)
    {
        CL_DEBUG_PRINT(CL_DEBUG_ERROR,
                ("Mutex Unlock failed with rc = 0x%x\n",rc));
    }

error_return:
    if ((clHandleCheckin(databaseHandle, dispatchHandle)) != CL_OK)
    {
        CL_DEBUG_PRINT(CL_DEBUG_ERROR,
                ("clHandleCheckin failed"));
    }

    return rc;
}
void clPyGlueStart(int block)
{
    ClRcT rc;
    clRunPython("eoApp.Start()");

    if (block)
      {
      ClTimerTimeOutT forever={0};
      rc = clOsalMutexLock(&pyMutex);
      CL_ASSERT(rc==CL_OK); 
      clOsalCondWait (&event,&pyMutex,forever);
      }
}
Example #19
0
cl_status_t
cl_ptr_vector_set_capacity(IN cl_ptr_vector_t * const p_vector,
			   IN const size_t new_capacity)
{
	void *p_new_ptr_array;

	CL_ASSERT(p_vector);
	CL_ASSERT(p_vector->state == CL_INITIALIZED);

	/* Do we have to do anything here? */
	if (new_capacity <= p_vector->capacity) {
		/* Nope */
		return (CL_SUCCESS);
	}

	/* Allocate our pointer array. */
	p_new_ptr_array = malloc(new_capacity * sizeof(void *));
	if (!p_new_ptr_array)
		return (CL_INSUFFICIENT_MEMORY);
	else
		memset(p_new_ptr_array, 0, new_capacity * sizeof(void *));

	if (p_vector->p_ptr_array) {
		/* Copy the old pointer array into the new. */
		memcpy(p_new_ptr_array, p_vector->p_ptr_array,
		       p_vector->capacity * sizeof(void *));

		/* Free the old pointer array. */
		free((void *)p_vector->p_ptr_array);
	}

	/* Set the new array. */
	p_vector->p_ptr_array = p_new_ptr_array;

	/* Update the vector with the new capactity. */
	p_vector->capacity = new_capacity;

	return (CL_SUCCESS);
}
static void clAmsMgmtOIExtendedCacheAdd(ClAmsMgmtOIExtendedClassTypeT type,
                                        ClCorInstanceIdT instance, 
                                        ClAmsMgmtOIExtendedEntityConfigT *pConfig,
                                        ClUint32T configSize)
{
    struct hashStruct **table = NULL;
    ClAmsMgmtOIExtendedCacheT *cacheEntry = NULL;
    if(type >= CL_AMS_MGMT_OI_EXTENDED_CLASS_MAX || !pConfig || !configSize)
        return;
    clOsalMutexLock(&gClAmsMgmtOICacheMutex);
    table = gClAmsMgmtOIExtendedCacheTable[type];
    if( (cacheEntry = clAmsMgmtOIExtendedCacheFind(table, instance) ) )
    {
        CL_ASSERT(cacheEntry->pConfig != NULL);
        if(cacheEntry->configSize != configSize)
        {
            clHeapFree(cacheEntry->pConfig);
            cacheEntry->pConfig = clHeapCalloc(1, configSize);
            CL_ASSERT(cacheEntry->pConfig != NULL);
            cacheEntry->configSize = configSize;
        }
        memcpy(cacheEntry->pConfig, pConfig, configSize);
    }
    else
    {
        ClAmsMgmtOIExtendedEntityConfigT *pExtendedConfig = NULL;
        cacheEntry = clHeapCalloc(1, sizeof(*cacheEntry));
        CL_ASSERT(cacheEntry != NULL);
        pExtendedConfig = clHeapCalloc(1, configSize);
        CL_ASSERT(pExtendedConfig != NULL);
        cacheEntry->pConfig = pExtendedConfig;
        cacheEntry->instance = instance;
        memcpy(cacheEntry->pConfig, pConfig, configSize);
        hashAdd(table, entityCacheHashKey(instance), &cacheEntry->hash);
        clLogNotice("AMF", "MGMT", "Added entity [%s], instance [%d] to the extended class [%s]",
                    pConfig->entity.name.value, instance, gClAmsMgmtOIExtendedCacheStrTable[type]);
    }
    clOsalMutexUnlock(&gClAmsMgmtOICacheMutex);
}
osm_bind_handle_t
osm_vendor_bind(IN osm_vendor_t * const p_vend,
		IN osm_bind_info_t * const p_bind_info,
		IN osm_mad_pool_t * const p_mad_pool,
		IN osm_vend_mad_recv_callback_t mad_recv_callback,
		IN void *context)
{
	osm_bind_handle_t h_bind;

	OSM_LOG_ENTER(p_vend->p_log);

	CL_ASSERT(p_vend);
	CL_ASSERT(p_bind_info);
	CL_ASSERT(p_mad_pool);
	CL_ASSERT(mad_recv_callback);
	CL_ASSERT(context);

	UNUSED_PARAM(p_vend);
	UNUSED_PARAM(p_mad_pool);
	UNUSED_PARAM(mad_recv_callback);
	UNUSED_PARAM(context);

	h_bind = (osm_bind_handle_t) malloc(sizeof(*h_bind));
	if (h_bind != NULL) {
		memset(h_bind, 0, sizeof(*h_bind));
		h_bind->p_vend = p_vend;
		h_bind->port_guid = p_bind_info->port_guid;
		h_bind->mad_class = p_bind_info->mad_class;
		h_bind->class_version = p_bind_info->class_version;
		h_bind->is_responder = p_bind_info->is_responder;
		h_bind->is_trap_processor = p_bind_info->is_trap_processor;
		h_bind->is_report_processor = p_bind_info->is_report_processor;
		h_bind->send_q_size = p_bind_info->send_q_size;
		h_bind->recv_q_size = p_bind_info->recv_q_size;
	}

	OSM_LOG_EXIT(p_vend->p_log);
	return (h_bind);
}
/*******************************************************************************
Feature API: alarmClockCkptInitialize

*******************************************************************************/
SaAisErrorT
alarmClockCkptInitialize (void)
{
    SaAisErrorT  ret_code = SA_AIS_OK;

    if (ckpt_svc_hdl == 0)
    {
        ret_code = saCkptInitialize(&ckpt_svc_hdl, NULL, &ckpt_version);    
        if (ret_code != SA_AIS_OK)
        {
            alarmClockLogWrite(CL_LOG_SEV_ERROR,
                    "alarmClockCkptInitialize(pid=%d): Failed %x\n", 
                    getpid(), ret_code);
        }
    }    
    sessionInit();
    ClRcT rc = clOsalMutexInit(&alarmClockCkptMutex);
    CL_ASSERT(rc == CL_OK);
    ioVecs = clHeapCalloc(MAX_NUM_IOVECS, sizeof(*ioVecs));
    CL_ASSERT(ioVecs != NULL);
    return ret_code;
}
/*----------------------------------------------------------------------------
 *  Cluster Member Eject Callback Handler
 *---------------------------------------------------------------------------*/
ClRcT clGmsClusterMemberEjectCallbackHandler(
    CL_IN   ClGmsClusterMemberEjectCallbackDataT* const res)
{
    ClRcT rc = CL_OK;
    struct gms_instance *gms_instance_ptr = NULL;
    ClGmsHandleT gmsHandle = CL_GMS_INVALID_HANDLE;
    
    CL_ASSERT(res != NULL);
    
    gmsHandle = res->gmsHandle;
    rc = clHandleCheckout(gmsHandleDb, gmsHandle, (void**)&gms_instance_ptr);
    if (rc != CL_OK)
    {
        goto error_free_res;
    }

    if (gms_instance_ptr == NULL)
    {
        rc = CL_GMS_RC(CL_ERR_NULL_POINTER);
        goto error_free_res;
    }

    
    if (gms_instance_ptr->
                cluster_manage_callbacks.clGmsMemberEjectCallback == NULL)
    {
        rc = CL_GMS_RC(CL_ERR_NO_CALLBACK);
        goto error_checkin_free_res;
    }
    
    /*
     * Calling the user's callback function with the data.  The user cannot
     * free the data we provide.  If it needs to reatin it, it has to copy
     * it out from what we provide here.
     */
    (*gms_instance_ptr->cluster_manage_callbacks.clGmsMemberEjectCallback)
                               (res->reason);
     
error_checkin_free_res:
    if (clHandleCheckin(gmsHandleDb, gmsHandle))
    {
        clLogError(CLM,NA,
                   "\nclHandleCheckin failed");
    }


error_free_res:
    clHeapFree((void*)res);
    
    return rc;
}
Example #24
0
static ib_api_status_t
__osm_ca_info_init(IN osm_vendor_t * const p_vend,
		   IN osm_ca_info_t * const p_ca_info,
		   IN const ib_net64_t ca_guid)
{
	ib_api_status_t status;

	OSM_LOG_ENTER(p_vend->p_log);

	p_ca_info->guid = ca_guid;

	if (osm_log_is_active(p_vend->p_log, OSM_LOG_VERBOSE)) {
		osm_log(p_vend->p_log, OSM_LOG_VERBOSE,
			"__osm_ca_info_init: "
			"Querying CA 0x%" PRIx64 ".\n", cl_ntoh64(ca_guid));
	}

	status = ib_query_ca_by_guid(p_vend->h_al, ca_guid, NULL,
				     &p_ca_info->attr_size);
	if ((status != IB_INSUFFICIENT_MEMORY) && (status != IB_SUCCESS)) {
		osm_log(p_vend->p_log, OSM_LOG_ERROR,
			"__osm_ca_info_init: ERR 3B05: "
			"Unexpected status getting CA attributes (%s).\n",
			ib_get_err_str(status));
		goto Exit;
	}

	CL_ASSERT(p_ca_info->attr_size);

	p_ca_info->p_attr = malloc(p_ca_info->attr_size);
	if (p_ca_info->p_attr == NULL) {
		osm_log(p_vend->p_log, OSM_LOG_ERROR,
			"__osm_ca_info_init: ERR 3B06: "
			"Unable to allocate attribute storage.\n");
		goto Exit;
	}

	status = ib_query_ca_by_guid(p_vend->h_al, ca_guid, p_ca_info->p_attr,
				     &p_ca_info->attr_size);
	if (status != IB_SUCCESS) {
		osm_log(p_vend->p_log, OSM_LOG_ERROR,
			"__osm_ca_info_init: ERR 3B07: "
			"Unexpected status getting CA attributes (%s).\n",
			ib_get_err_str(status));
		goto Exit;
	}

Exit:
	OSM_LOG_EXIT(p_vend->p_log);
	return (status);
}
Example #25
0
/*
 * Callback to translate quick composite to grow pool constructor callback.
 */
static cl_status_t
__cl_pool_init_cb(IN void **const pp_obj,
		  IN const uint32_t count,
		  IN void *const context,
		  OUT cl_pool_item_t ** const pp_pool_item)
{
	cl_pool_t *p_pool = (cl_pool_t *) context;
	cl_pool_obj_t *p_pool_obj;
	cl_status_t status = CL_SUCCESS;

	CL_ASSERT(p_pool);
	CL_ASSERT(pp_obj);
	CL_ASSERT(count == 1);

	UNUSED_PARAM(count);

	/*
	 * Set our pointer to the list item, which is stored at the beginning of
	 * the first component.
	 */
	p_pool_obj = (cl_pool_obj_t *) * pp_obj;
	*pp_pool_item = &p_pool_obj->pool_item;

	/* Calculate the pointer to the user's first component. */
	*pp_obj = ((uint8_t *) * pp_obj) + sizeof(cl_pool_obj_t);

	/*
	 * Set the object pointer in the pool item to point to the first of the
	 * user's components.
	 */
	p_pool_obj->p_object = *pp_obj;

	/* Invoke the user's constructor callback. */
	if (p_pool->pfn_init)
		status = p_pool->pfn_init(*pp_obj, (void *)p_pool->context);

	return (status);
}
Example #26
0
/* //////////////////////////////////////////////////////////////////////// */
void
osm_vendor_put(IN osm_bind_handle_t h_bind,
	       IN osm_vend_wrap_t * const p_vend_wrap,
	       IN ib_mad_t * const p_mad)
{

	FSTATUS Status;

	mad_bind_info_t *p_mad_bind_info;
	umadt_obj_t *p_umadt_obj;

	/*  */
	/*  Validate the vendor mad transport handle */
	/*  */
	CL_ASSERT(h_bind);
	p_mad_bind_info = (mad_bind_info_t *) h_bind;
	p_umadt_obj = p_mad_bind_info->p_umadt_obj;

	/*  sanity check */
	CL_ASSERT(p_umadt_obj->init_done);
	CL_ASSERT(h_bind);
	CL_ASSERT(__valid_mad_handle(p_mad_bind_info));
	CL_ASSERT(p_vend_wrap);
	/* CL_ASSERT( (ib_mad_t*)&p_vend_wrap->p_madt_struct->IBMad == p_mad ); */

	/*  Release the MAD based on the direction of the MAD */
	if (p_vend_wrap->direction == SEND) {
		/*  */
		/* For a send the PostSend released the MAD with Umadt. Simply dealloacte the */
		/* local memory that was allocated on the osm_vendor_get() call. */
		/*  */
		free(p_mad);
#if 0
		Status =
		    p_umadt_obj->uMadtInterface.
		    uMadtReleaseSendMad(p_mad_bind_info->umadt_handle,
					p_vend_wrap->p_madt_struct);
		if (Status != FSUCCESS) {
			/* printf("uMadtReleaseSendMad: Status  = <%d>\n", Status); */
			return;
		}
#endif
	} else if (p_vend_wrap->direction == RECEIVE) {
		CL_ASSERT((ib_mad_t *) & p_vend_wrap->p_madt_struct->IBMad ==
			  p_mad);
		Status =
		    p_umadt_obj->uMadtInterface.
		    uMadtReleaseRecvMad(p_mad_bind_info->umadt_handle,
					p_vend_wrap->p_madt_struct);
		if (Status != FSUCCESS) {
			/* printf("uMadtReleaseRecvMad Status=<%d>\n", Status); */
			return;
		}
	} else {
		return;
	}
	return;
}
ib_api_status_t
__osmv_txnmgr_insert_txn(IN osmv_txn_mgr_t * p_tx_mgr,
			 IN osmv_txn_ctx_t * p_txn, IN uint64_t key)
{
	cl_map_obj_t *p_obj = NULL;
	cl_map_item_t *p_item;
	uint64_t tmp_key;

	CL_ASSERT(p_tx_mgr);
	CL_ASSERT(p_txn);

	key = osmv_txn_get_key(p_txn);
	p_obj = malloc(sizeof(cl_map_obj_t));
	if (NULL == p_obj)
		return IB_INSUFFICIENT_MEMORY;

	osm_log(p_tx_mgr->p_log, OSM_LOG_DEBUG,
		"__osmv_txnmgr_insert_txn: "
		"Inserting key: 0x%llX to map ptr:%p\n", key,
		p_tx_mgr->p_txn_map);

	memset(p_obj, 0, sizeof(cl_map_obj_t));

	cl_qmap_set_obj(p_obj, p_txn);
	/* assuming lookup with this key was made and the result was IB_NOT_FOUND */
	cl_qmap_insert(p_tx_mgr->p_txn_map, key, &p_obj->item);

	p_item = cl_qmap_head(p_tx_mgr->p_txn_map);
	while (p_item != cl_qmap_end(p_tx_mgr->p_txn_map)) {
		tmp_key = cl_qmap_key(p_item);
		osm_log(p_tx_mgr->p_log, OSM_LOG_DEBUG,
			"__osmv_txnmgr_insert_txn: "
			"Found key 0x%llX \n", tmp_key);
		p_item = cl_qmap_next(p_item);
	}

	return IB_SUCCESS;
}
Example #28
0
size_t
cl_ptr_vector_find_from_end(IN const cl_ptr_vector_t * const p_vector,
			    IN cl_pfn_ptr_vec_find_t pfn_callback,
			    IN const void *const context)
{
	size_t i;

	CL_ASSERT(p_vector);
	CL_ASSERT(p_vector->state == CL_INITIALIZED);
	CL_ASSERT(pfn_callback);

	i = p_vector->size;

	while (i) {
		/* Invoke the callback for the current element. */
		if (pfn_callback(i, (void *)p_vector->p_ptr_array[--i],
				 (void *)context) == CL_SUCCESS) {
			return (i);
		}
	}

	return (p_vector->size);
}
/*-----------------------------------------------------------------------------
 * Cluster Track Stop API
 *---------------------------------------------------------------------------*/
ClRcT clGmsClusterTrackStop(
    CL_IN const ClGmsHandleT gmsHandle)
{
    ClRcT                           rc = CL_OK;
    struct gms_instance            *gms_instance_ptr = NULL;
    ClGmsClusterTrackStopRequestT   req = {0};
    ClGmsClusterTrackStopResponseT *res = NULL;
    
    CL_GMS_SET_CLIENT_VERSION( req );
    rc = clHandleCheckout(gmsHandleDb, gmsHandle, (void**)&gms_instance_ptr);
    if (rc != CL_OK)
    {
        return CL_GMS_RC(CL_ERR_INVALID_HANDLE);
    }
    
    if (gms_instance_ptr == NULL)
    {
        return CL_GMS_RC(CL_ERR_NULL_POINTER);
    }

    req.gmsHandle = gmsHandle;
    req.address.iocPhyAddress.nodeAddress = clIocLocalAddressGet();
    rc = clEoMyEoIocPortGet(&(req.address.iocPhyAddress.portId));
    
    CL_ASSERT(rc == CL_OK); /* Should really never happen */
    
    clGmsMutexLock(gms_instance_ptr->response_mutex);
    
    rc = cl_gms_cluster_track_stop_rmd(&req, 0 /* use def. timeout */, &res);
    if ((rc != CL_OK) || (res == NULL)) /* If there was an error, res isn't allocated */
    {
        goto error_exit;
    }
    
    rc = res->rc;
    
    clHeapFree((void*)res);

error_exit:
    clGmsMutexUnlock(gms_instance_ptr->response_mutex);

    if (clHandleCheckin(gmsHandleDb, gmsHandle) != CL_OK)
    {
        clLogError(CLM,NA,
                   "\nclHandleCheckin failed");
    }

    
    return CL_GMS_RC(rc);
}
Example #30
0
/*
 * Search for a timer with an earlier timeout than the one provided by
 * the context.  Both the list item and the context are pointers to
 * a cl_timer_t structure with valid timeouts.
 */
static cl_status_t
__cl_timer_find(IN const cl_list_item_t * const p_list_item,
		IN void *const context)
{
	cl_timer_t *p_in_list;
	cl_timer_t *p_new;

	CL_ASSERT(p_list_item);
	CL_ASSERT(context);

	p_in_list = (cl_timer_t *) p_list_item;
	p_new = (cl_timer_t *) context;

	CL_ASSERT(p_in_list->state == CL_INITIALIZED);
	CL_ASSERT(p_new->state == CL_INITIALIZED);

	CL_ASSERT(p_in_list->timer_state == CL_TIMER_QUEUED);

	if (__cl_timer_is_earlier(&p_in_list->timeout, &p_new->timeout))
		return (CL_SUCCESS);

	return (CL_NOT_FOUND);
}