Example #1
0
int ib_find_exact_cached_pkey(struct ib_device *device,
			      u8                port_num,
			      u16               pkey,
			      u16              *index)
{
	struct ib_pkey_cache *cache;
	unsigned long flags;
	int i;
	int ret = -ENOENT;

	if (port_num < start_port(device) || port_num > end_port(device))
		return -EINVAL;

	*index = -1;

	read_lock_irqsave(&device->cache.lock, flags);

	if (!device->cache.pkey_cache)
		goto out;

	cache = device->cache.pkey_cache[port_num - start_port(device)];
	if (!cache)
		goto out;

	for (i = 0; i < cache->table_len; ++i)
		if (cache->table[i] == pkey) {
			*index = i;
			ret = 0;
			break;
		}
out:
	read_unlock_irqrestore(&device->cache.lock, flags);
	return ret;
}
static int _roce_gid_cache_find_gid(struct ib_device *ib_dev, union ib_gid *gid,
				    const struct ib_gid_attr *val,
				    unsigned long mask,
				    u8 *port, u16 *index)
{
	struct ib_roce_gid_cache *cache;
	u8 p;
	int local_index;

	if (!ib_dev->cache.roce_gid_cache)
		return -ENOENT;

	for (p = 0; p < ib_dev->phys_port_cnt; p++) {
		if (rdma_port_get_link_layer(ib_dev, p + start_port(ib_dev)) !=
		    IB_LINK_LAYER_ETHERNET)
			continue;
		cache = ib_dev->cache.roce_gid_cache[p];
		if (!cache || !cache->active)
			continue;
		local_index = find_gid(cache, gid, val, mask);
		if (local_index >= 0) {
			if (index)
				*index = local_index;
			if (port)
				*port = p + start_port(ib_dev);
			return 0;
		}
	}

	return -ENOENT;
}
Example #3
0
int ib_find_cached_gid(struct ib_device *device,
		       union ib_gid	*gid,
		       u8               *port_num,
		       u16              *index)
{
	struct ib_gid_cache *cache;
	unsigned long flags;
	int p, i;
	int ret = -ENOENT;

	*port_num = -1;
	if (index)
		*index = -1;

	read_lock_irqsave(&device->cache.lock, flags);

	for (p = 0; p <= end_port(device) - start_port(device); ++p) {
		cache = device->cache.gid_cache[p];
		for (i = 0; i < cache->table_len; ++i) {
			if (!memcmp(gid, &cache->table[i], sizeof *gid)) {
				*port_num = p + start_port(device);
				if (index)
					*index = i;
				ret = 0;
				goto found;
			}
		}
	}
found:
	read_unlock_irqrestore(&device->cache.lock, flags);

	return ret;
}
Example #4
0
int ib_get_cached_pkey(struct ib_device *device,
		       u8                port_num,
		       int               index,
		       u16              *pkey)
{
	struct ib_pkey_cache *cache;
	unsigned long flags;
	int i, ret = 0;

	if (port_num < start_port(device) || port_num > end_port(device))
		return -EINVAL;

	read_lock_irqsave(&device->cache.lock, flags);

	cache = device->cache.pkey_cache[port_num - start_port(device)];

	if (index < 0 || index >= cache->table_len) {
		ret = -EINVAL;
		goto out_unlock;
	}

	for (i = 0; i < cache->table_len; ++i)
		if (cache->entry[i].index == index)
			break;

	if (i < cache->table_len)
		*pkey = cache->entry[i].pkey;
	else
		*pkey = 0x0000;

out_unlock:
	read_unlock_irqrestore(&device->cache.lock, flags);
	return ret;
}
int roce_gid_cache_find_gid_by_port(struct ib_device *ib_dev, union ib_gid *gid,
				    enum ib_gid_type gid_type, u8 port,
				    struct net *net, int if_index, u16 *index)
{
	int local_index;
	struct ib_roce_gid_cache *cache;
	unsigned long mask = GID_ATTR_FIND_MASK_GID_TYPE;
	struct ib_gid_attr val = {.gid_type = gid_type};

	if (!ib_dev->cache.roce_gid_cache || port < start_port(ib_dev) ||
	    port >= (start_port(ib_dev) + ib_dev->phys_port_cnt))
		return -ENOENT;

	cache = ib_dev->cache.roce_gid_cache[port - start_port(ib_dev)];
	if (!cache || !cache->active)
		return -ENOENT;

	mask |= get_netdev_from_ifindex(net, if_index, &val);

	local_index = find_gid(cache, gid, &val, mask);
	if (local_index >= 0) {
		if (index)
			*index = local_index;
		return 0;
	}

	return -ENOENT;
}
Example #6
0
int ib_get_cached_pkey(struct ib_device *device,
		       u8                port_num,
		       int               index,
		       u16              *pkey)
{
	struct ib_pkey_cache *cache;
	unsigned long flags;
	int ret = -EINVAL;

	if (port_num < start_port(device) || port_num > end_port(device))
		return -EINVAL;

	read_lock_irqsave(&device->cache.lock, flags);

	if (device->cache.pkey_cache) {
		cache = device->cache.pkey_cache[port_num - start_port(device)];

		if (cache && index >= 0 && index < cache->table_len) {
			*pkey = cache->table[index];
			ret = 0;
		}
	}

	read_unlock_irqrestore(&device->cache.lock, flags);

	return ret;
}
Example #7
0
int ib_get_cached_gid(struct ib_device *device,
		      u8                port_num,
		      int               index,
		      union ib_gid     *gid)
{
	struct ib_gid_cache *cache;
	unsigned long flags;
	int ret = 0;

	if (port_num < start_port(device) || port_num > end_port(device))
		return -EINVAL;

	read_lock_irqsave(&device->cache.lock, flags);

	cache = device->cache.gid_cache[port_num - start_port(device)];

	if (index < 0 || index >= cache->table_len)
		ret = -EINVAL;
	else
		*gid = cache->table[index];

	read_unlock_irqrestore(&device->cache.lock, flags);

	return ret;
}
Example #8
0
int ib_find_cached_pkey(struct ib_device *device,
			u8                port_num,
			u16               pkey,
			u16              *index)
{
	struct ib_pkey_cache *cache;
	unsigned long flags;
	int i;
	int ret = -ENOENT;

	if (port_num < start_port(device) || port_num > end_port(device))
		return -EINVAL;

	read_lock_irqsave(&device->cache.lock, flags);

	cache = device->cache.pkey_cache[port_num - start_port(device)];

	*index = -1;

	for (i = 0; i < cache->table_len; ++i)
		if ((cache->table[i] & 0x7fff) == (pkey & 0x7fff)) {
			*index = i;
			ret = 0;
			break;
		}

	read_unlock_irqrestore(&device->cache.lock, flags);

	return ret;
}
Example #9
0
int ib_find_cached_pkey(struct ib_device *device,
			u8                port_num,
			u16               pkey,
			u16              *index)
{
	struct ib_pkey_cache *cache;
	int i;
	int ret = -ENOENT;

	if (port_num < start_port(device) || port_num > end_port(device))
		return -EINVAL;

	mtx_lock(&device->cache.lock);

	cache = device->cache.pkey_cache[port_num - start_port(device)];

	*index = -1;

	for (i = 0; i < cache->table_len; ++i)
		if ((cache->table[i] & 0x7fff) == (pkey & 0x7fff)) {
			*index = i;
			ret = 0;
			break;
		}

	mtx_unlock(&device->cache.lock);

	return ret;
}
Example #10
0
/* Register console for printk's, etc. */
int __init
init_etrax_debug(void)
{
        start_port(port);

#ifdef CONFIG_ETRAX_KGDB
	start_port(kgdb_port);
#endif /* CONFIG_ETRAX_KGDB */
	return 0;
}
Example #11
0
int ib_get_cached_lmc(struct ib_device *device,
		      u8                port_num,
		      u8                *lmc)
{
	int ret = 0;

	if (port_num < start_port(device) || port_num > end_port(device))
		return -EINVAL;

	mtx_lock(&device->cache.lock);
	*lmc = device->cache.lmc_cache[port_num - start_port(device)];
	mtx_unlock(&device->cache.lock);

	return ret;
}
Example #12
0
static void
console_write(struct console *co, const char *buf, unsigned int len)
{
	if (!port)
		return;

#ifdef CONFIG_SVINTO_SIM
	/* no use to simulate the serial debug output */
	SIMCOUT(buf, len);
	return;
#endif

	start_port();

#ifdef CONFIG_ETRAX_KGDB
	/* kgdb needs to output debug info using the gdb protocol */
	putDebugString(buf, len);
	return;
#endif

	if (debug_write_function)
		debug_write_function(co->index, buf, len);
	else
		console_write_direct(co, buf, len);
}
int roce_sync_all_netdev_gids(struct ib_device *ib_dev, u8 port,
			      struct list_head *list)
{
	struct ib_roce_gid_cache *cache;
	int ix;

	if (!ib_dev->cache.roce_gid_cache)
		return 0;

	cache  = ib_dev->cache.roce_gid_cache[port - start_port(ib_dev)];

	if (!cache || !cache->active)
		return -ENOSYS;

	mutex_lock(&cache->lock);

	for (ix = 0; ix < cache->sz; ix++) {
		bool found = false;
		struct roce_netdev_list *entry;

		list_for_each_entry(entry, list, list) {
			if (cache->data_vec[ix].attr.ndev == entry->ndev) {
				found = true;
				break;
			}
		}
		if (!found)
			write_gid(ib_dev, port, cache, ix, &zgid, &zattr);
	}

	mutex_unlock(&cache->lock);
	return 0;
}
Example #14
0
int ib_get_cached_lmc(struct ib_device *device,
		      u8                port_num,
		      u8                *lmc)
{
	unsigned long flags;
	int ret = 0;

	if (port_num < start_port(device) || port_num > end_port(device))
		return -EINVAL;

	read_lock_irqsave(&device->cache.lock, flags);
	*lmc = device->cache.lmc_cache[port_num - start_port(device)];
	read_unlock_irqrestore(&device->cache.lock, flags);

	return ret;
}
int roce_del_gid(struct ib_device *ib_dev, u8 port,
		 union ib_gid *gid, struct ib_gid_attr *attr)
{
	struct ib_roce_gid_cache *cache;
	int ix;

	if (!ib_dev->cache.roce_gid_cache)
		return 0;

	cache  = ib_dev->cache.roce_gid_cache[port - start_port(ib_dev)];

	if (!cache || !cache->active)
		return -ENOSYS;

	if (attr->ndev) {
		/* Deleting default GIDs in not permitted */
		if (rdma_is_default_gid(attr->ndev, gid,
		    roce_v1_noncompat_gid ? true : false))
			return -EPERM;
	}

	mutex_lock(&cache->lock);

	ix = find_gid(cache, gid, attr,
		      GID_ATTR_FIND_MASK_GID_TYPE |
		      GID_ATTR_FIND_MASK_NETDEV);
	if (ix < 0)
		goto out_unlock;

	write_gid(ib_dev, port, cache, ix, &zgid, &zattr);

out_unlock:
	mutex_unlock(&cache->lock);
	return 0;
}
Example #16
0
int ib_find_cached_pkey(struct ib_device *device,
			u8                port_num,
			u16               pkey,
			u16              *index)
{
	struct ib_pkey_cache *cache;
	unsigned long flags;
	int i;
	int ret = -ENOENT;
	int partial_ix = -1;

	if (port_num < start_port(device) || port_num > end_port(device))
		return -EINVAL;

	*index = -1;

	read_lock_irqsave(&device->cache.lock, flags);

	if (!device->cache.pkey_cache)
		goto out;

	cache = device->cache.pkey_cache[port_num - start_port(device)];
	if (!cache)
		goto out;

	for (i = 0; i < cache->table_len; ++i)
		if ((cache->table[i] & 0x7fff) == (pkey & 0x7fff)) {
			if (cache->table[i] & 0x8000) {
				*index = i;
				ret = 0;
				break;
			} else
				partial_ix = i;
		}

	if (ret && partial_ix >= 0) {
		*index = partial_ix;
		ret = 0;
	}
out:
	read_unlock_irqrestore(&device->cache.lock, flags);
	return ret;
}
Example #17
0
static void ib_cache_setup_one(struct ib_device *device)
{
    int p;

    rwlock_init(&device->cache.lock);

    device->cache.pkey_cache =
        kmalloc(sizeof *device->cache.pkey_cache *
                (end_port(device) - start_port(device) + 1), GFP_KERNEL);
    device->cache.gid_cache =
        kmalloc(sizeof *device->cache.pkey_cache *
                (end_port(device) - start_port(device) + 1), GFP_KERNEL);

    if (!device->cache.pkey_cache || !device->cache.gid_cache) {
        printk(KERN_WARNING "Couldn't allocate cache "
               "for %s\n", device->name);
        goto err;
    }

    for (p = 0; p <= end_port(device) - start_port(device); ++p) {
        device->cache.pkey_cache[p] = NULL;
        device->cache.gid_cache [p] = NULL;
        ib_cache_update(device, p + start_port(device));
    }

    INIT_IB_EVENT_HANDLER(&device->cache.event_handler,
                          device, ib_cache_event);
    if (ib_register_event_handler(&device->cache.event_handler))
        goto err_cache;

    return;

err_cache:
    for (p = 0; p <= end_port(device) - start_port(device); ++p) {
        kfree(device->cache.pkey_cache[p]);
        kfree(device->cache.gid_cache[p]);
    }

err:
    kfree(device->cache.pkey_cache);
    kfree(device->cache.gid_cache);
}
int roce_add_gid(struct ib_device *ib_dev, u8 port,
		 union ib_gid *gid, struct ib_gid_attr *attr)
{
	struct ib_roce_gid_cache *cache;
	int ix;
	int ret = 0;
	struct net_device *idev;

	if (!ib_dev->cache.roce_gid_cache)
		return -ENOSYS;

	cache = ib_dev->cache.roce_gid_cache[port - start_port(ib_dev)];

	if (!cache || !cache->active)
		return -ENOSYS;

	if (!memcmp(gid, &zgid, sizeof(*gid)))
		return -EINVAL;

	if (ib_dev->get_netdev) {
		rcu_read_lock();
		idev = ib_dev->get_netdev(ib_dev, port);
		if (idev && attr->ndev != idev)
			/* Adding default GIDs in not permitted */
			if (rdma_is_default_gid(idev, gid,
			    roce_v1_noncompat_gid ? true : false)) {
				rcu_read_unlock();
				return -EPERM;
			}
		rcu_read_unlock();
	}

	mutex_lock(&cache->lock);

	ix = find_gid(cache, gid, attr, GID_ATTR_FIND_MASK_GID_TYPE |
		      GID_ATTR_FIND_MASK_NETDEV);
	if (ix >= 0)
		goto out_unlock;

	ix = find_gid(cache, &zgid, NULL, 0);
	if (ix < 0) {
		ret = -ENOSPC;
		goto out_unlock;
	}

	write_gid(ib_dev, port, cache, ix, gid, attr);

out_unlock:
	mutex_unlock(&cache->lock);
	return ret;
}
Example #19
0
int ib_get_cached_gid(struct ib_device *device,
		      u8                port_num,
		      int               index,
		      union ib_gid     *gid)
{
	struct ib_gid_cache *cache;
	unsigned long flags;
	int i, ret = 0;

	if (port_num < start_port(device) || port_num > end_port(device))
		return -EINVAL;

	read_lock_irqsave(&device->cache.lock, flags);

	cache = device->cache.gid_cache[port_num - start_port(device)];

	if (index < 0 || index >= cache->table_len) {
		ret = -EINVAL;
		goto out_unlock;
	}

	for (i = 0; i < cache->table_len; ++i)
		if (cache->entry[i].index == index)
			break;

	if (i < cache->table_len)
		*gid = cache->entry[i].gid;
	else {
		ret = ib_query_gid(device, port_num, index, gid);
		if (ret)
			printk(KERN_WARNING "ib_query_gid failed (%d) for %s (index %d)\n",
			       ret, device->name, index);
	}

out_unlock:
	read_unlock_irqrestore(&device->cache.lock, flags);
	return ret;
}
int __init
init_etrax_debug(void)
{
	static int first = 1;

	if (!first) {
		unregister_console(&sercons);
		register_console(&sercons0);
		register_console(&sercons1);
		register_console(&sercons2);
		register_console(&sercons3);
                init_dummy_console();
		return 0;
	}

	first = 0;
	register_console(&sercons);
	start_port(port);
#ifdef CONFIG_ETRAX_KGDB
	start_port(kgdb_port);
#endif
	return 0;
}
Example #21
0
static void ib_cache_cleanup_one(struct ib_device *device)
{
    int p;

    ib_unregister_event_handler(&device->cache.event_handler);
    flush_scheduled_work();

    for (p = 0; p <= end_port(device) - start_port(device); ++p) {
        kfree(device->cache.pkey_cache[p]);
        kfree(device->cache.gid_cache[p]);
    }

    kfree(device->cache.pkey_cache);
    kfree(device->cache.gid_cache);
}
Example #22
0
int ib_get_cached_gid(struct ib_device *device,
		      u8                port_num,
		      int               index,
		      union ib_gid     *gid)
{
	struct ib_gid_cache *cache;
	int ret = 0;

	if (port_num < start_port(device) || port_num > end_port(device))
		return -EINVAL;

	mtx_lock(&device->cache.lock);

	cache = device->cache.gid_cache[port_num - start_port(device)];

	if (index < 0 || index >= cache->table_len)
		ret = -EINVAL;
	else
		*gid = cache->table[index];

	mtx_unlock(&device->cache.lock);

	return ret;
}
Example #23
0
int raw_printk(const char *fmt, ...)
{
	static char buf[1024];
	int printed_len;
	static int first = 1;
	if (first) {
		/* Force reinitialization of the port to get manual mode. */
		port->started = 0;
		start_port(port);
		first = 0;
	}
	va_list args;
	va_start(args, fmt);
	printed_len = vsnprintf(buf, sizeof(buf), fmt, args);
	va_end(args);
	console_write_direct(NULL, buf, strlen(buf));
	return printed_len;
}
Example #24
0
static void ib_cache_cleanup_one(struct ib_device *device)
{
	int p;

	ib_unregister_event_handler(&device->cache.event_handler);
#ifdef XXX
	flush_scheduled_work();
#endif

	for (p = 0; p <= end_port(device) - start_port(device); ++p) {
		free(device->cache.pkey_cache[p], M_DEVBUF);
		free(device->cache.gid_cache[p], M_DEVBUF);
	}

	free(device->cache.pkey_cache, M_DEVBUF);
	free(device->cache.gid_cache, M_DEVBUF);
	free(device->cache.lmc_cache, M_DEVBUF);
}
Example #25
0
static void ib_cache_setup_one(struct ib_device *device)
{
	int p;

	mtx_init(&device->cache.lock, "ib device cache", NULL, 
		MTX_DUPOK|MTX_DEF);

	device->cache.pkey_cache =
		malloc(sizeof *device->cache.pkey_cache *
			(end_port(device) - start_port(device) + 1), M_DEVBUF, 
			M_NOWAIT);
	device->cache.gid_cache =
		malloc(sizeof *device->cache.gid_cache *
			(end_port(device) - start_port(device) + 1), M_DEVBUF, 
			M_NOWAIT);

	device->cache.lmc_cache = malloc(sizeof *device->cache.lmc_cache *
					  (end_port(device) -
					  start_port(device) + 1),
					  M_DEVBUF, M_NOWAIT);

	if (!device->cache.pkey_cache || !device->cache.gid_cache ||
	    !device->cache.lmc_cache) {
		log(LOG_WARNING, "Couldn't allocate cache "
		       "for %s\n", device->name);
		goto err;
	}

	for (p = 0; p <= end_port(device) - start_port(device); ++p) {
		device->cache.pkey_cache[p] = NULL;
		device->cache.gid_cache [p] = NULL;
		ib_cache_update(device, p + start_port(device));
	}

	INIT_IB_EVENT_HANDLER(&device->cache.event_handler,
			      device, ib_cache_event);
	if (ib_register_event_handler(&device->cache.event_handler))
		goto err_cache;

	return;

err_cache:
	for (p = 0; p <= end_port(device) - start_port(device); ++p) {
		free(device->cache.pkey_cache[p], M_DEVBUF);
		free(device->cache.gid_cache[p], M_DEVBUF);
	}

err:
	free(device->cache.pkey_cache, M_DEVBUF);
	free(device->cache.gid_cache, M_DEVBUF);
	free(device->cache.lmc_cache, M_DEVBUF);
}
Example #26
0
static void ib_cache_cleanup_one(struct ib_device *device)
{
	int p;

	if (!(device->cache.pkey_cache && device->cache.gid_cache &&
	      device->cache.lmc_cache))
		return;

	ib_unregister_event_handler(&device->cache.event_handler);
	flush_workqueue(ib_wq);

	for (p = 0; p <= end_port(device) - start_port(device); ++p) {
		kfree(device->cache.pkey_cache[p]);
		kfree(device->cache.gid_cache[p]);
	}

	kfree(device->cache.pkey_cache);
	kfree(device->cache.gid_cache);
	kfree(device->cache.lmc_cache);
}
static int __init
console_setup(struct console *co, char *options)
{
	char* s;

	if (options) {
		port = &ports[co->index];
		port->baudrate = 115200;
                port->parity = 'N';
                port->bits = 8;
		port->baudrate = simple_strtoul(options, NULL, 10);
		s = options;
		while(*s >= '0' && *s <= '9')
			s++;
		if (*s) port->parity = *s++;
		if (*s) port->bits   = *s++ - '0';
		port->started = 0;
		start_port(0);
	}
	return 0;
}
int roce_gid_cache_get_gid(struct ib_device *ib_dev, u8 port, int index,
			   union ib_gid *gid, struct ib_gid_attr *attr)
{
	struct ib_roce_gid_cache *cache;
	union ib_gid local_gid;
	struct ib_gid_attr local_attr;
	unsigned int orig_seq;

	if (!ib_dev->cache.roce_gid_cache)
		return -EINVAL;

	cache = ib_dev->cache.roce_gid_cache[port - start_port(ib_dev)];

	if (!cache || !cache->active)
		return -ENOSYS;

	if (index < 0 || index >= cache->sz)
		return -EINVAL;

	orig_seq = ACCESS_ONCE(cache->data_vec[index].seq);
	/* Make sure we read the sequence number before copying the
	 * gid to local storage. */
	smp_rmb();

	memcpy(&local_gid, &cache->data_vec[index].gid, sizeof(local_gid));
	memcpy(&local_attr, &cache->data_vec[index].attr, sizeof(local_attr));
	/* Ensure the local copy completed reading before verifying
	 * the new sequence number. */
	smp_rmb();

	if (orig_seq == -1 ||
	    orig_seq != ACCESS_ONCE(cache->data_vec[index].seq))
		return -EAGAIN;

	memcpy(gid, &local_gid, sizeof(*gid));
	if (attr)
		memcpy(attr, &local_attr, sizeof(*attr));
	return 0;
}
static int roce_gid_cache_setup_one(struct ib_device *ib_dev)
{
	u8 port;
	int err = 0;

	if (!ib_dev->modify_gid)
		return -ENOSYS;

	ib_dev->cache.roce_gid_cache =
		kcalloc(ib_dev->phys_port_cnt,
			sizeof(*ib_dev->cache.roce_gid_cache), GFP_KERNEL);

	if (!ib_dev->cache.roce_gid_cache) {
		pr_warn("failed to allocate roce addr cache for %s\n",
			ib_dev->name);
		return -ENOMEM;
	}

	for (port = 0; port < ib_dev->phys_port_cnt; port++) {
		if (rdma_port_get_link_layer(ib_dev, port + start_port(ib_dev))
		    != IB_LINK_LAYER_ETHERNET)
			continue;
		ib_dev->cache.roce_gid_cache[port] =
			alloc_roce_gid_cache(ib_dev->gid_tbl_len[port]);
		if (!ib_dev->cache.roce_gid_cache[port]) {
			err = -ENOMEM;
			goto rollback_cache_setup;
		}
	}
	return 0;

rollback_cache_setup:
	for (port = 1; port <= ib_dev->phys_port_cnt; port++)
		free_roce_gid_cache(ib_dev, port);

	kfree(ib_dev->cache.roce_gid_cache);
	ib_dev->cache.roce_gid_cache = NULL;
	return err;
}
int roce_del_all_netdev_gids(struct ib_device *ib_dev, u8 port,
			     struct net_device *ndev)
{
	struct ib_roce_gid_cache *cache;
	int ix;

	if (!ib_dev->cache.roce_gid_cache)
		return 0;

	cache  = ib_dev->cache.roce_gid_cache[port - start_port(ib_dev)];

	if (!cache || !cache->active)
		return -ENOSYS;

	mutex_lock(&cache->lock);

	for (ix = 0; ix < cache->sz; ix++)
		if (cache->data_vec[ix].attr.ndev == ndev)
			write_gid(ib_dev, port, cache, ix, &zgid, &zattr);

	mutex_unlock(&cache->lock);
	return 0;
}