Esempio n. 1
0
int ib_get_cached_pkey(struct ib_device *device,
		       u8                port_num,
		       int               index,
		       u16              *pkey)
{
	struct ib_pkey_cache *cache;
	unsigned long flags;
	int ret = 0;

	if (port_num < start_port(device) || port_num > end_port(device))
		return -EINVAL;

	read_lock_irqsave(&device->cache.lock, flags);

	cache = device->cache.pkey_cache[port_num - start_port(device)];

	if (index < 0 || index >= cache->table_len)
		ret = -EINVAL;
	else
		*pkey = cache->table[index];

	read_unlock_irqrestore(&device->cache.lock, flags);

	return ret;
}
Esempio n. 2
0
void
ttt_deinit_io (void)
{
#if     (defined __unix__ || defined __BEOS__) && !defined __MSDOS__
  deinit_conio ();
#endif

  end_port ();
  deinit_port ();
}
Esempio n. 3
0
static void ib_cache_setup_one(struct ib_device *device)
{
    int p;

    rwlock_init(&device->cache.lock);

    device->cache.pkey_cache =
        kmalloc(sizeof *device->cache.pkey_cache *
                (end_port(device) - start_port(device) + 1), GFP_KERNEL);
    device->cache.gid_cache =
        kmalloc(sizeof *device->cache.pkey_cache *
                (end_port(device) - start_port(device) + 1), GFP_KERNEL);

    if (!device->cache.pkey_cache || !device->cache.gid_cache) {
        printk(KERN_WARNING "Couldn't allocate cache "
               "for %s\n", device->name);
        goto err;
    }

    for (p = 0; p <= end_port(device) - start_port(device); ++p) {
        device->cache.pkey_cache[p] = NULL;
        device->cache.gid_cache [p] = NULL;
        ib_cache_update(device, p + start_port(device));
    }

    INIT_IB_EVENT_HANDLER(&device->cache.event_handler,
                          device, ib_cache_event);
    if (ib_register_event_handler(&device->cache.event_handler))
        goto err_cache;

    return;

err_cache:
    for (p = 0; p <= end_port(device) - start_port(device); ++p) {
        kfree(device->cache.pkey_cache[p]);
        kfree(device->cache.gid_cache[p]);
    }

err:
    kfree(device->cache.pkey_cache);
    kfree(device->cache.gid_cache);
}
Esempio n. 4
0
int ib_get_cached_lmc(struct ib_device *device,
		      u8                port_num,
		      u8                *lmc)
{
	int ret = 0;

	if (port_num < start_port(device) || port_num > end_port(device))
		return -EINVAL;

	mtx_lock(&device->cache.lock);
	*lmc = device->cache.lmc_cache[port_num - start_port(device)];
	mtx_unlock(&device->cache.lock);

	return ret;
}
Esempio n. 5
0
static void ib_cache_cleanup_one(struct ib_device *device)
{
    int p;

    ib_unregister_event_handler(&device->cache.event_handler);
    flush_scheduled_work();

    for (p = 0; p <= end_port(device) - start_port(device); ++p) {
        kfree(device->cache.pkey_cache[p]);
        kfree(device->cache.gid_cache[p]);
    }

    kfree(device->cache.pkey_cache);
    kfree(device->cache.gid_cache);
}
Esempio n. 6
0
int ib_get_cached_lmc(struct ib_device *device,
		      u8                port_num,
		      u8                *lmc)
{
	unsigned long flags;
	int ret = 0;

	if (port_num < start_port(device) || port_num > end_port(device))
		return -EINVAL;

	read_lock_irqsave(&device->cache.lock, flags);
	*lmc = device->cache.lmc_cache[port_num - start_port(device)];
	read_unlock_irqrestore(&device->cache.lock, flags);

	return ret;
}
Esempio n. 7
0
static void ib_cache_cleanup_one(struct ib_device *device)
{
	int p;

	ib_unregister_event_handler(&device->cache.event_handler);
#ifdef XXX
	flush_scheduled_work();
#endif

	for (p = 0; p <= end_port(device) - start_port(device); ++p) {
		free(device->cache.pkey_cache[p], M_DEVBUF);
		free(device->cache.gid_cache[p], M_DEVBUF);
	}

	free(device->cache.pkey_cache, M_DEVBUF);
	free(device->cache.gid_cache, M_DEVBUF);
	free(device->cache.lmc_cache, M_DEVBUF);
}
Esempio n. 8
0
int ib_find_cached_pkey(struct ib_device *device,
			u8                port_num,
			u16               pkey,
			u16              *index)
{
	struct ib_pkey_cache *cache;
	unsigned long flags;
	int i;
	int ret = -ENOENT;
	int partial_ix = -1;

	if (port_num < start_port(device) || port_num > end_port(device))
		return -EINVAL;

	*index = -1;

	read_lock_irqsave(&device->cache.lock, flags);

	if (!device->cache.pkey_cache)
		goto out;

	cache = device->cache.pkey_cache[port_num - start_port(device)];
	if (!cache)
		goto out;

	for (i = 0; i < cache->table_len; ++i)
		if ((cache->table[i] & 0x7fff) == (pkey & 0x7fff)) {
			if (cache->table[i] & 0x8000) {
				*index = i;
				ret = 0;
				break;
			} else
				partial_ix = i;
		}

	if (ret && partial_ix >= 0) {
		*index = partial_ix;
		ret = 0;
	}
out:
	read_unlock_irqrestore(&device->cache.lock, flags);
	return ret;
}
Esempio n. 9
0
static void ib_cache_cleanup_one(struct ib_device *device)
{
	int p;

	if (!(device->cache.pkey_cache && device->cache.gid_cache &&
	      device->cache.lmc_cache))
		return;

	ib_unregister_event_handler(&device->cache.event_handler);
	flush_workqueue(ib_wq);

	for (p = 0; p <= end_port(device) - start_port(device); ++p) {
		kfree(device->cache.pkey_cache[p]);
		kfree(device->cache.gid_cache[p]);
	}

	kfree(device->cache.pkey_cache);
	kfree(device->cache.gid_cache);
	kfree(device->cache.lmc_cache);
}
Esempio n. 10
0
int ib_get_cached_gid(struct ib_device *device,
		      u8                port_num,
		      int               index,
		      union ib_gid     *gid)
{
	struct ib_gid_cache *cache;
	unsigned long flags;
	int i, ret = 0;

	if (port_num < start_port(device) || port_num > end_port(device))
		return -EINVAL;

	read_lock_irqsave(&device->cache.lock, flags);

	cache = device->cache.gid_cache[port_num - start_port(device)];

	if (index < 0 || index >= cache->table_len) {
		ret = -EINVAL;
		goto out_unlock;
	}

	for (i = 0; i < cache->table_len; ++i)
		if (cache->entry[i].index == index)
			break;

	if (i < cache->table_len)
		*gid = cache->entry[i].gid;
	else {
		ret = ib_query_gid(device, port_num, index, gid);
		if (ret)
			printk(KERN_WARNING "ib_query_gid failed (%d) for %s (index %d)\n",
			       ret, device->name, index);
	}

out_unlock:
	read_unlock_irqrestore(&device->cache.lock, flags);
	return ret;
}
Esempio n. 11
0
int ib_get_cached_gid(struct ib_device *device,
		      u8                port_num,
		      int               index,
		      union ib_gid     *gid)
{
	struct ib_gid_cache *cache;
	int ret = 0;

	if (port_num < start_port(device) || port_num > end_port(device))
		return -EINVAL;

	mtx_lock(&device->cache.lock);

	cache = device->cache.gid_cache[port_num - start_port(device)];

	if (index < 0 || index >= cache->table_len)
		ret = -EINVAL;
	else
		*gid = cache->table[index];

	mtx_unlock(&device->cache.lock);

	return ret;
}