コード例 #1
0
ib_ctx_handler_collection::ib_ctx_handler_collection()
{
	ibchc_logdbg("");

	/* Read ib table from kernel and save it in local variable. */
	update_tbl();

	//Print table
	print_val_tbl();

	ibchc_logdbg("Done");
}
コード例 #2
0
ib_ctx_handler_collection::~ib_ctx_handler_collection()
{
	ibchc_logdbg("");

	ib_context_map_t::iterator ib_ctx_iter;
	while ((ib_ctx_iter = m_ib_ctx_map.begin()) != m_ib_ctx_map.end()) {
		ib_ctx_handler* p_ib_ctx_handler = ib_ctx_iter->second;
		delete p_ib_ctx_handler;
		m_ib_ctx_map.erase(ib_ctx_iter);
	}

	ibchc_logdbg("Done");
}
コード例 #3
0
size_t ib_ctx_handler_collection::mem_reg_on_all_devices(void* addr, size_t length, 
                                                  ibv_mr** mr_array, size_t mr_array_sz,
                                                  uint64_t access)
{
	ibchc_logfunc("");
	size_t mr_pos = 0;
	ib_context_map_t::iterator ib_ctx_iter;
	for (ib_ctx_iter = m_ib_ctx_map.begin(); ib_ctx_iter != m_ib_ctx_map.end(), mr_pos<mr_array_sz; ib_ctx_iter++, mr_pos++) {
		ib_ctx_handler* p_ib_ctx_handler = ib_ctx_iter->second;
		mr_array[mr_pos] = p_ib_ctx_handler->mem_reg(addr, length, access);
		BULLSEYE_EXCLUDE_BLOCK_START
		if (mr_array[mr_pos] == NULL) {
			ibchc_logwarn("Failure in mem_reg: addr=%p, length=%d, mr_pos=%d, mr_array[mr_pos]=%d, dev=%p, ibv_dev=%s", 
				    addr, length, mr_pos, mr_array[mr_pos], p_ib_ctx_handler, p_ib_ctx_handler->get_ibv_device()->name);
			return (size_t)-1;
		}
		BULLSEYE_EXCLUDE_BLOCK_END
		errno = 0; //ibv_reg_mr() set errno=12 despite successful returning
#ifdef VMA_IBV_ACCESS_ALLOCATE_MR
		if ((access & VMA_IBV_ACCESS_ALLOCATE_MR) != 0) { // contig pages mode
			// When using 'IBV_ACCESS_ALLOCATE_MR', ibv_reg_mr will return a pointer that its 'addr' field will hold the address of the allocated memory.
			// Second registration and above is done using 'IBV_ACCESS_LOCAL_WRITE' and the 'addr' we received from the first registration.
			addr = mr_array[0]->addr;
			access &= ~VMA_IBV_ACCESS_ALLOCATE_MR;
		}
#endif

		ibchc_logdbg("addr=%p, length=%d, pos=%d, mr[pos]->lkey=%u, dev1=%p, dev2=%p",
			   addr, length, mr_pos, mr_array[mr_pos]->lkey, mr_array[mr_pos]->context->device, p_ib_ctx_handler->get_ibv_device());
	}
	return mr_pos;
}
コード例 #4
0
void ib_ctx_handler_collection::map_ib_devices() //return num_devices, can use rdma_get_devices()
{
	struct ibv_context** pp_ibv_context_list = rdma_get_devices(&m_n_num_devices);
	BULLSEYE_EXCLUDE_BLOCK_START
	if (!pp_ibv_context_list) {
		ibchc_logwarn("Failure in rdma_get_devices() (error=%d %m)", errno);
		ibchc_logpanic("Please check OFED installation");
	}
	if (!m_n_num_devices) {
		rdma_free_devices(pp_ibv_context_list);
		ibchc_logpanic("No RDMA capable devices found!");
	}
	BULLSEYE_EXCLUDE_BLOCK_END
	ibchc_logdbg("Mapping %d ibv devices", m_n_num_devices);
	for (int i = 0; i < m_n_num_devices; i++) {
		m_ib_ctx_map[pp_ibv_context_list[i]] = new ib_ctx_handler(pp_ibv_context_list[i]);
	}

	rdma_free_devices(pp_ibv_context_list);
}
コード例 #5
0
void ib_ctx_handler_collection::update_tbl(const char *ifa_name)
{
	struct ibv_device **dev_list = NULL;
	ib_ctx_handler * p_ib_ctx_handler = NULL;
	int num_devices = 0;
	int i;

	ibchc_logdbg("Checking for offload capable IB devices...");

	dev_list = vma_ibv_get_device_list(&num_devices);

	BULLSEYE_EXCLUDE_BLOCK_START
	if (!dev_list) {
		ibchc_logerr("Failure in vma_ibv_get_device_list() (error=%d %m)", errno);
		ibchc_logerr("Please check rdma configuration");
		throw_vma_exception("No IB capable devices found!");
	}
	if (!num_devices) {
		vlog_levels_t _level = ifa_name ? VLOG_DEBUG : VLOG_ERROR; // Print an error only during initialization.
		vlog_printf(_level, "VMA does not detect IB capable devices\n");
		vlog_printf(_level, "No performance gain is expected in current configuration\n");
	}

	BULLSEYE_EXCLUDE_BLOCK_END

	for (i = 0; i < num_devices; i++) {
		struct ib_ctx_handler::ib_ctx_handler_desc desc = {dev_list[i]};

		/* 2. Skip existing devices (compare by name) */
		if (ifa_name && !check_device_name_ib_name(ifa_name, dev_list[i]->name)) {
			continue;
		}

		if (ib_ctx_handler::is_mlx4(dev_list[i]->name)) {
			// Note: mlx4 does not support this capability.
			if(safe_mce_sys().enable_socketxtreme) {
				ibchc_logdbg("Blocking offload: mlx4 interfaces in socketxtreme mode");
				continue;
			}

			// Check if mlx4 steering creation is supported.
			// Those setting are passed to the VM by the Hypervisor - NO NEED to specify the param on the VM.
			if (safe_mce_sys().hypervisor == mce_sys_var::HYPER_NONE) {
				check_flow_steering_log_num_mgm_entry_size();
			}
		}

		/* 3. Add new ib devices */
		p_ib_ctx_handler = new ib_ctx_handler(&desc);
		if (!p_ib_ctx_handler) {
			ibchc_logerr("failed allocating new ib_ctx_handler (errno=%d %m)", errno);
			continue;
		}
		m_ib_ctx_map[p_ib_ctx_handler->get_ibv_device()] = p_ib_ctx_handler;
	}

	ibchc_logdbg("Check completed. Found %d offload capable IB devices", m_ib_ctx_map.size());

	if (dev_list) {
		ibv_free_device_list(dev_list);
	}
}