Esempio n. 1
0
static struct apq8016_sbc_data *apq8016_sbc_parse_of(struct snd_soc_card *card)
{
	struct device *dev = card->dev;
	struct snd_soc_dai_link *link;
	struct device_node *np, *codec, *cpu, *node  = dev->of_node;
	struct apq8016_sbc_data *data;
	int ret, num_links;

	ret = snd_soc_of_parse_card_name(card, "qcom,model");
	if (ret) {
		dev_err(dev, "Error parsing card name: %d\n", ret);
		return ERR_PTR(ret);
	}

	/* DAPM routes */
	if (of_property_read_bool(node, "qcom,audio-routing")) {
		ret = snd_soc_of_parse_audio_routing(card,
					"qcom,audio-routing");
		if (ret)
			return ERR_PTR(ret);
	}


	/* Populate links */
	num_links = of_get_child_count(node);

	/* Allocate the private data and the DAI link array */
	data = devm_kzalloc(dev, sizeof(*data) + sizeof(*link) * num_links,
			    GFP_KERNEL);
	if (!data)
		return ERR_PTR(-ENOMEM);

	card->dai_link	= &data->dai_link[0];
	card->num_links	= num_links;

	link = data->dai_link;

	for_each_child_of_node(node, np) {
		cpu = of_get_child_by_name(np, "cpu");
		codec = of_get_child_by_name(np, "codec");

		if (!cpu || !codec) {
			dev_err(dev, "Can't find cpu/codec DT node\n");
			return ERR_PTR(-EINVAL);
		}

		link->cpu_of_node = of_parse_phandle(cpu, "sound-dai", 0);
		if (!link->cpu_of_node) {
			dev_err(card->dev, "error getting cpu phandle\n");
			return ERR_PTR(-EINVAL);
		}

		ret = snd_soc_of_get_dai_name(cpu, &link->cpu_dai_name);
		if (ret) {
			dev_err(card->dev, "error getting cpu dai name\n");
			return ERR_PTR(ret);
		}

		ret = snd_soc_of_get_dai_link_codecs(dev, codec, link);

		if (ret < 0) {
			dev_err(card->dev, "error getting codec dai name\n");
			return ERR_PTR(ret);
		}

		link->platform_of_node = link->cpu_of_node;
		ret = of_property_read_string(np, "link-name", &link->name);
		if (ret) {
			dev_err(card->dev, "error getting codec dai_link name\n");
			return ERR_PTR(ret);
		}

		link->stream_name = link->name;
		link->init = apq8016_sbc_dai_init;
		link++;
	}
struct page *init_inode_metadata(struct inode *inode, struct inode *dir,
			const struct qstr *name, struct page *dpage)
{
	struct page *page;
	int err;

	if (is_inode_flag_set(F2FS_I(inode), FI_NEW_INODE)) {
		page = new_inode_page(inode);
		if (IS_ERR(page))
			return page;

		if (S_ISDIR(inode->i_mode)) {
			err = make_empty_dir(inode, dir, page);
			if (err)
				goto error;
		}

		err = f2fs_init_acl(inode, dir, page, dpage);
		if (err)
			goto put_error;

		err = f2fs_init_security(inode, dir, name, page);
		if (err)
			goto put_error;
	} else {
		page = get_node_page(F2FS_I_SB(dir), inode->i_ino);
		if (IS_ERR(page))
			return page;

		set_cold_node(inode, page);
	}

	if (name)
		init_dent_inode(name, page);

	/*
	 * This file should be checkpointed during fsync.
	 * We lost i_pino from now on.
	 */
	if (is_inode_flag_set(F2FS_I(inode), FI_INC_LINK)) {
		file_lost_pino(inode);
		/*
		 * If link the tmpfile to alias through linkat path,
		 * we should remove this inode from orphan list.
		 */
		if (inode->i_nlink == 0)
			remove_orphan_inode(F2FS_I_SB(dir), inode->i_ino);
		inc_nlink(inode);
	}
	return page;

put_error:
	f2fs_put_page(page, 1);
error:
	/* once the failed inode becomes a bad inode, i_mode is S_IFREG */
	truncate_inode_pages(&inode->i_data, 0);
	truncate_blocks(inode, 0, false);
	remove_dirty_dir_inode(inode);
	remove_inode_page(inode);
	return ERR_PTR(err);
}
Esempio n. 3
0
/* Common file handle decoding for both parent and dentry */
static struct dentry *
vnlayer_decode_fh(
    SUPER_T *sb,
    struct fid *fh,
    int len,                            /* counted in units of 4-bytes */
    int fhtype,
    int is_parent)
{
    MDKI_FID_T *lfidp;
    DENT_T *dp;
    int error, fidlen;
    SUPER_T *realsb;
    unsigned realsb_hash;

    fidlen = fhtype >> 1;
    if (fidlen == 0) {
        return ERR_PTR(-EINVAL);
    }

    if (len * 4 < MDKI_FID_LEN_WITH_HASH(fidlen)) {
        MDKI_VFS_LOG(VFS_LOG_ESTALE,
                      "%s: FH too small to be a MVFS FH\n",
                      __FUNCTION__);
        return ERR_PTR(-EINVAL);
    }

    lfidp = KMEM_ALLOC(MDKI_FID_ALLOC_LEN(fidlen), KM_SLEEP);
    if (lfidp == NULL) {
        return ERR_PTR(-ENOMEM);
    }

    if (is_parent) {
        error = vnlayer_unpack_fh((__u32 *)fh, len, fhtype, fidlen,
                                  NULL, lfidp);
    } else {
        error = vnlayer_unpack_fh((__u32 *)fh, len, fhtype, fidlen,
                                  lfidp, NULL);
    }

    if (error == 0) {
        realsb_hash = MDKI_FID_SB_HASH(fh, fidlen);

        /*
         * Search in the VOB mount list for the super_block we encoded.
         * If the result is not NULL, the superblock was locked with
         * lock_super and should be unlocked.
         */
        realsb = (SUPER_T *) mvfs_find_mount(vnlayer_eval_mount,
                                             &realsb_hash);

        if (realsb != NULL) {
            /*
             * It found a matching VOB mount to this hash, we will leave to
             * vnlayer_get_dentry decides wether we can trust this FID, 
             * it should be able to smell any staleness.
             */
            dp = vnlayer_get_dentry(realsb, lfidp);
            unlock_super(realsb);
            if (IS_ERR(dp)) {
                MDKI_VFS_LOG(VFS_LOG_ESTALE,
                    "%s: pid %d vnlayer_get_dentry returned error %ld\n",
                    __FUNCTION__, current->pid, PTR_ERR(dp));
            }
        } else {
            dp = ERR_PTR(-EINVAL);
            MDKI_VFS_LOG(VFS_LOG_ESTALE, "%s SB not found, hash=%08x\n",
                         __FUNCTION__, realsb_hash);
        }
    } else {
        dp = ERR_PTR(error);
    }
    KMEM_FREE(lfidp, MDKI_FID_ALLOC_LEN(fidlen));
    return dp;
}
struct q6v5_data __devinit *pil_q6v5_init(struct platform_device *pdev)
{
	struct q6v5_data *drv;
	struct resource *res;
	struct pil_desc *desc;
	int ret;

#ifdef CONFIG_MACH_LGE
	if (!strcmp(pdev->name, "mss"))
		dev_info(&pdev->dev, "pil_q6v5_init, %s \n", pdev->name);
#endif

	drv = devm_kzalloc(&pdev->dev, sizeof(*drv), GFP_KERNEL);
	if (!drv)
		return ERR_PTR(-ENOMEM);

	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "qdsp6_base");
	drv->reg_base = devm_request_and_ioremap(&pdev->dev, res);
	if (!drv->reg_base)
		return ERR_PTR(-ENOMEM);

	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "halt_base");
	drv->axi_halt_base = devm_ioremap(&pdev->dev, res->start,
					  resource_size(res));
	if (!drv->axi_halt_base)
		return ERR_PTR(-ENOMEM);

	desc = &drv->desc;
	ret = of_property_read_string(pdev->dev.of_node, "qcom,firmware-name",
				      &desc->name);
	if (ret)
		return ERR_PTR(ret);

	drv->xo = devm_clk_get(&pdev->dev, "xo");
	if (IS_ERR(drv->xo))
		return ERR_CAST(drv->xo);

	drv->vreg_cx = devm_regulator_get(&pdev->dev, "vdd_cx");
	if (IS_ERR(drv->vreg_cx))
		return ERR_CAST(drv->vreg_cx);

	drv->vreg_pll = devm_regulator_get(&pdev->dev, "vdd_pll");
	if (!IS_ERR(drv->vreg_pll)) {
		int voltage;
		ret = of_property_read_u32(pdev->dev.of_node, "qcom,vdd_pll",
					   &voltage);
		if (ret) {
			dev_err(&pdev->dev, "Failed to find vdd_pll voltage.\n");
			return ERR_PTR(ret);
		}

		ret = regulator_set_voltage(drv->vreg_pll, voltage, voltage);
		if (ret) {
			dev_err(&pdev->dev, "Failed to request vdd_pll voltage.\n");
			return ERR_PTR(ret);
		}

		ret = regulator_set_optimum_mode(drv->vreg_pll, 10000);
		if (ret < 0) {
			dev_err(&pdev->dev, "Failed to set vdd_pll mode.\n");
			return ERR_PTR(ret);
		}
	} else {
		 drv->vreg_pll = NULL;
	}

	desc->dev = &pdev->dev;

	return drv;
}
Esempio n. 5
0
static int crypt(struct blkcipher_desc *d,
		 struct blkcipher_walk *w, struct priv *ctx,
		 void (*tw)(struct crypto_tfm *, u8 *, const u8 *),
		 void (*fn)(struct crypto_tfm *, u8 *, const u8 *))
{
	int err;
	unsigned int avail;
	const int bs = XTS_BLOCK_SIZE;
	struct sinfo s = {
		.tfm = crypto_cipher_tfm(ctx->child),
		.fn = fn
	};
	u8 *wsrc;
	u8 *wdst;

	err = blkcipher_walk_virt(d, w);
	if (!w->nbytes)
		return err;

	s.t = (be128 *)w->iv;
	avail = w->nbytes;

	wsrc = w->src.virt.addr;
	wdst = w->dst.virt.addr;

	/*                            */
	tw(crypto_cipher_tfm(ctx->tweak), w->iv, w->iv);

	goto first;

	for (;;) {
		do {
			gf128mul_x_ble(s.t, s.t);

first:
			xts_round(&s, wdst, wsrc);

			wsrc += bs;
			wdst += bs;
		} while ((avail -= bs) >= bs);

		err = blkcipher_walk_done(d, w, avail);
		if (!w->nbytes)
			break;

		avail = w->nbytes;

		wsrc = w->src.virt.addr;
		wdst = w->dst.virt.addr;
	}

	return err;
}

static int encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
		   struct scatterlist *src, unsigned int nbytes)
{
	struct priv *ctx = crypto_blkcipher_ctx(desc->tfm);
	struct blkcipher_walk w;

	blkcipher_walk_init(&w, dst, src, nbytes);
	return crypt(desc, &w, ctx, crypto_cipher_alg(ctx->tweak)->cia_encrypt,
		     crypto_cipher_alg(ctx->child)->cia_encrypt);
}

static int decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
		   struct scatterlist *src, unsigned int nbytes)
{
	struct priv *ctx = crypto_blkcipher_ctx(desc->tfm);
	struct blkcipher_walk w;

	blkcipher_walk_init(&w, dst, src, nbytes);
	return crypt(desc, &w, ctx, crypto_cipher_alg(ctx->tweak)->cia_encrypt,
		     crypto_cipher_alg(ctx->child)->cia_decrypt);
}

int xts_crypt(struct blkcipher_desc *desc, struct scatterlist *sdst,
	      struct scatterlist *ssrc, unsigned int nbytes,
	      struct xts_crypt_req *req)
{
	const unsigned int bsize = XTS_BLOCK_SIZE;
	const unsigned int max_blks = req->tbuflen / bsize;
	struct blkcipher_walk walk;
	unsigned int nblocks;
	be128 *src, *dst, *t;
	be128 *t_buf = req->tbuf;
	int err, i;

	BUG_ON(max_blks < 1);

	blkcipher_walk_init(&walk, sdst, ssrc, nbytes);

	err = blkcipher_walk_virt(desc, &walk);
	nbytes = walk.nbytes;
	if (!nbytes)
		return err;

	nblocks = min(nbytes / bsize, max_blks);
	src = (be128 *)walk.src.virt.addr;
	dst = (be128 *)walk.dst.virt.addr;

	/*                            */
	req->tweak_fn(req->tweak_ctx, (u8 *)&t_buf[0], walk.iv);

	i = 0;
	goto first;

	for (;;) {
		do {
			for (i = 0; i < nblocks; i++) {
				gf128mul_x_ble(&t_buf[i], t);
first:
				t = &t_buf[i];

				/*               */
				be128_xor(dst + i, t, src + i);
			}

			/*                  */
			req->crypt_fn(req->crypt_ctx, (u8 *)dst,
				      nblocks * bsize);

			/*               */
			for (i = 0; i < nblocks; i++)
				be128_xor(dst + i, dst + i, &t_buf[i]);

			src += nblocks;
			dst += nblocks;
			nbytes -= nblocks * bsize;
			nblocks = min(nbytes / bsize, max_blks);
		} while (nblocks > 0);

		*(be128 *)walk.iv = *t;

		err = blkcipher_walk_done(desc, &walk, nbytes);
		nbytes = walk.nbytes;
		if (!nbytes)
			break;

		nblocks = min(nbytes / bsize, max_blks);
		src = (be128 *)walk.src.virt.addr;
		dst = (be128 *)walk.dst.virt.addr;
	}

	return err;
}
EXPORT_SYMBOL_GPL(xts_crypt);

static int init_tfm(struct crypto_tfm *tfm)
{
	struct crypto_cipher *cipher;
	struct crypto_instance *inst = (void *)tfm->__crt_alg;
	struct crypto_spawn *spawn = crypto_instance_ctx(inst);
	struct priv *ctx = crypto_tfm_ctx(tfm);
	u32 *flags = &tfm->crt_flags;

	cipher = crypto_spawn_cipher(spawn);
	if (IS_ERR(cipher))
		return PTR_ERR(cipher);

	if (crypto_cipher_blocksize(cipher) != XTS_BLOCK_SIZE) {
		*flags |= CRYPTO_TFM_RES_BAD_BLOCK_LEN;
		crypto_free_cipher(cipher);
		return -EINVAL;
	}

	ctx->child = cipher;

	cipher = crypto_spawn_cipher(spawn);
	if (IS_ERR(cipher)) {
		crypto_free_cipher(ctx->child);
		return PTR_ERR(cipher);
	}

	/*                                                            */
	if (crypto_cipher_blocksize(cipher) != XTS_BLOCK_SIZE) {
		crypto_free_cipher(cipher);
		crypto_free_cipher(ctx->child);
		*flags |= CRYPTO_TFM_RES_BAD_BLOCK_LEN;
		return -EINVAL;
	}

	ctx->tweak = cipher;

	return 0;
}

static void exit_tfm(struct crypto_tfm *tfm)
{
	struct priv *ctx = crypto_tfm_ctx(tfm);
	crypto_free_cipher(ctx->child);
	crypto_free_cipher(ctx->tweak);
}

static struct crypto_instance *alloc(struct rtattr **tb)
{
	struct crypto_instance *inst;
	struct crypto_alg *alg;
	int err;

	err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_BLKCIPHER);
	if (err)
		return ERR_PTR(err);

	alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_CIPHER,
				  CRYPTO_ALG_TYPE_MASK);
	if (IS_ERR(alg))
		return ERR_CAST(alg);

	inst = crypto_alloc_instance("xts", alg);
	if (IS_ERR(inst))
		goto out_put_alg;

	inst->alg.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER;
	inst->alg.cra_priority = alg->cra_priority;
	inst->alg.cra_blocksize = alg->cra_blocksize;

	if (alg->cra_alignmask < 7)
		inst->alg.cra_alignmask = 7;
	else
		inst->alg.cra_alignmask = alg->cra_alignmask;

	inst->alg.cra_type = &crypto_blkcipher_type;

	inst->alg.cra_blkcipher.ivsize = alg->cra_blocksize;
	inst->alg.cra_blkcipher.min_keysize =
		2 * alg->cra_cipher.cia_min_keysize;
	inst->alg.cra_blkcipher.max_keysize =
		2 * alg->cra_cipher.cia_max_keysize;

	inst->alg.cra_ctxsize = sizeof(struct priv);

	inst->alg.cra_init = init_tfm;
	inst->alg.cra_exit = exit_tfm;

	inst->alg.cra_blkcipher.setkey = setkey;
	inst->alg.cra_blkcipher.encrypt = encrypt;
	inst->alg.cra_blkcipher.decrypt = decrypt;

out_put_alg:
	crypto_mod_put(alg);
	return inst;
}

static void free(struct crypto_instance *inst)
{
	crypto_drop_spawn(crypto_instance_ctx(inst));
	kfree(inst);
}
Esempio n. 6
0
char* talpa__d_path( struct dentry *dentry, struct vfsmount *vfsmnt, struct dentry *root, struct vfsmount *rootmnt, char *buffer, int buflen)
{
    char* path;

    /* Get the function pointer for the real __d_path if we're going to call it. */
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0) || (defined TALPA_HAS_DPATH)

#   if defined TALPA_DPATH_SLES11
    typedef char *(*d_path_func)(const struct path *, struct path *, char *, int, int);
#   elif defined TALPA_DPATH_PATH
    typedef char *(*d_path_func)(const struct path *, struct path *, char *, int);
#   elif defined TALPA_DPATH_SUSE103
    typedef char *(*d_path_func)(struct dentry *, struct vfsmount *, struct dentry *, struct vfsmount *, char *buffer, int buflen, int flags);
#   else
    typedef char *(*d_path_func)(struct dentry *, struct vfsmount *, struct dentry *, struct vfsmount *, char *buffer, int buflen);
#   endif


#   if defined TALPA_HAS_DPATH_ADDR
    d_path_func kernel_d_path = (d_path_func)talpa_get_symbol("__d_path", (void *)TALPA_DPATH_ADDR);
#   else
    d_path_func kernel_d_path = &__d_path;
#   endif

#   if defined TALPA_DPATH_SLES11 || defined TALPA_DPATH_PATH
    struct path pathPath;
    struct path rootPath;
#   endif
#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0) || (defined TALPA_HAS_DPATH) */

#if defined HOLD_DCACHE_LOCK_WHILE_CALLING_D_PATH
    spin_lock(&dcache_lock);
#endif

#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0) || (defined TALPA_HAS_DPATH)
    /* Calling the real __d_path */
#   if defined TALPA_DPATH_SLES11 || defined TALPA_DPATH_PATH
    pathPath.dentry = dentry;
    pathPath.mnt = vfsmnt;
    rootPath.dentry = root;
    rootPath.mnt = rootmnt;
#endif

#if defined TALPA_D_DNAME_DIRECT_DPATH
    if (dentry->d_op && dentry->d_op->d_dname)
    {
        path = d_path(&pathPath, buffer, buflen);
        if ( unlikely( IS_ERR(path) != 0 ) )
        {
            critical("talpa__d_path: d_path returned an error: %ld",PTR_ERR(path));
            path = NULL;
        }
        if ( NULL != path )
        {
            return path;
        }
    }
#endif /* TALPA_D_DNAME_DIRECT_DPATH */

#   if defined TALPA_DPATH_SLES11
    path = kernel_d_path(&pathPath, &rootPath, buffer, buflen, 0);
#   elif defined TALPA_DPATH_PATH
    path = kernel_d_path(&pathPath, &rootPath, buffer, buflen);
#   elif defined TALPA_DPATH_SUSE103
    path = kernel_d_path(dentry, vfsmnt, root, rootmnt, buffer, buflen, 0);
#   else
    path = kernel_d_path(dentry, vfsmnt, root, rootmnt, buffer, buflen);
#   endif
#else
    /* Call our own version */
    path = __talpa_d_path(dentry, vfsmnt, root, rootmnt, buffer, buflen);
#endif

#if defined HOLD_DCACHE_LOCK_WHILE_CALLING_D_PATH
    spin_unlock(&dcache_lock);
#endif

    if ( unlikely( IS_ERR(path) != 0 ) )
    {
        critical("talpa__d_path: kernel__d_path returned an error: %ld",PTR_ERR(path));
        path = NULL;
    }
    else if ( unlikely( NULL == path ) )
    {
#ifdef TALPA_D_DNAME_DIRECT_DPATH
        /* only use this as a fall-back, it will only return the relative path from a chroot
         * Use this in cases where kernel_d_path fails to return a valid path for bind mounts
         * in newer kernel in a systemd environment */
        path = d_path(&pathPath, buffer, buflen);
        if ( unlikely( IS_ERR(path) != 0 ) )
        {
            critical("talpa__d_path: kernel_d_path returned an error: %ld",PTR_ERR(path));
            path = NULL;
        }
        dbg("    dpath=%s",path);

        if (dentry->d_op && dentry->d_op->d_dname)
        {
            err("dpath=%s - dentry has d_op and d_dname=%p",path,dentry->d_op->d_dname);
        }
#endif
        if ( NULL == path )
        {
            if (!IS_ROOT(dentry) && d_unhashed(dentry)) {
                dbg("talpa__d_path: kernel_d_path returned NULL for deleted file");
                dbg("    basename=%s",dentry->d_name.name);
            }
            else
            {
                info("talpa__d_path: kernel_d_path returned NULL for non-deleted file");
                info("    basename=%s",dentry->d_name.name);
            }
        }
        else
        {
            if (!IS_ROOT(dentry) && d_unhashed(dentry))
            {
                dbg("    talpa__d_path: kernel_d_path returned NULL but d_path returned path %s for deleted file",path);
            }
            else
            {
#ifdef TALPA_MNT_NAMESPACE
                if (NULL != getNamespaceInfo(vfsmnt) && (!S_ISDIR(dentry->d_inode->i_mode)))
                {
                    /* we're in a namespace/container, append '(namespace)' to the path */
                    int pathlen=strlen(path);
                    if (pathlen + 13 > buflen)
                    {
                        return ERR_PTR(-ENAMETOOLONG);
                    }
                    memmove(buffer, path, pathlen);
                    path = buffer;
                    memcpy(buffer + pathlen, " (namespace)", 13);
                }
#endif

                /* the systemd / containers / bind mount case. */
                dbg("    talpa__d_path: kernel_d_path returned NULL but d_path returned path %s for non-deleted file",path);
            }
        }
    }

    return path;
}
Esempio n. 7
0
/*
 * look up an entry in a directory
 */
static struct dentry *afs_lookup(struct inode *dir, struct dentry *dentry,
				 struct nameidata *nd)
{
	struct afs_vnode *vnode;
	struct afs_fid fid;
	struct inode *inode;
	struct key *key;
	int ret;

	vnode = AFS_FS_I(dir);

	_enter("{%x:%u},%p{%s},",
	       vnode->fid.vid, vnode->fid.vnode, dentry, dentry->d_name.name);

	ASSERTCMP(dentry->d_inode, ==, NULL);

	if (dentry->d_name.len >= AFSNAMEMAX) {
		_leave(" = -ENAMETOOLONG");
		return ERR_PTR(-ENAMETOOLONG);
	}

	if (test_bit(AFS_VNODE_DELETED, &vnode->flags)) {
		_leave(" = -ESTALE");
		return ERR_PTR(-ESTALE);
	}

	key = afs_request_key(vnode->volume->cell);
	if (IS_ERR(key)) {
		_leave(" = %ld [key]", PTR_ERR(key));
		return ERR_CAST(key);
	}

	ret = afs_validate(vnode, key);
	if (ret < 0) {
		key_put(key);
		_leave(" = %d [val]", ret);
		return ERR_PTR(ret);
	}

	ret = afs_do_lookup(dir, dentry, &fid, key);
	if (ret < 0) {
		inode = afs_try_auto_mntpt(ret, dentry, dir, key, &fid);
		if (!IS_ERR(inode)) {
			key_put(key);
			goto success;
		}

		ret = PTR_ERR(inode);
		key_put(key);
		if (ret == -ENOENT) {
			d_add(dentry, NULL);
			_leave(" = NULL [negative]");
			return NULL;
		}
		_leave(" = %d [do]", ret);
		return ERR_PTR(ret);
	}
	dentry->d_fsdata = (void *)(unsigned long) vnode->status.data_version;

	/* instantiate the dentry */
	inode = afs_iget(dir->i_sb, key, &fid, NULL, NULL);
	key_put(key);
	if (IS_ERR(inode)) {
		_leave(" = %ld", PTR_ERR(inode));
		return ERR_CAST(inode);
	}

success:
	d_add(dentry, inode);
	_leave(" = 0 { vn=%u u=%u } -> { ino=%lu v=%u }",
	       fid.vnode,
	       fid.unique,
	       dentry->d_inode->i_ino,
	       dentry->d_inode->i_generation);

	return NULL;
}
Esempio n. 8
0
struct ipanic_header *ipanic_header_from_sd(void)
{
	struct ipanic_data_header *dheader;
	int dt;
	char str[256];
	size_t size = 0;
	struct ipanic_header *header;
	struct ipanic_data_header dheader_header = {
		.type = IPANIC_DT_HEADER,
		.offset = 0,
		.used = sizeof(struct ipanic_header),
	};
	header = (struct ipanic_header *)ipanic_data_from_sd(&dheader_header, 0);
	if (IS_ERR_OR_NULL((void *)header)) {
		LOGD("read header failed[%ld]\n", PTR_ERR((void *)header));
		header = NULL;
	} else if (header->magic != AEE_IPANIC_MAGIC) {
		LOGD("no ipanic data[%x]\n", header->magic);
		header = NULL;
		ipanic_erase();
	} else {
		for (dt = IPANIC_DT_HEADER + 1; dt < IPANIC_DT_RESERVED31; dt++) {
			dheader = &header->data_hdr[dt];
			if (dheader->valid) {
				size += snprintf(str + size, 256 - size, "%s[%x@%x],",
						 dheader->name, dheader->used, dheader->offset);
			}
		}
		LOGD("ipanic data available^v^%s^v^\n", str);
	}
	return header;
}

struct aee_oops *ipanic_oops_from_sd(void)
{
	struct aee_oops *oops = NULL;
	struct ipanic_header *hdr = NULL;
	struct ipanic_data_header *dheader;
	char *data;
	int i;
	hdr = ipanic_header_from_sd();
	if (hdr == NULL) {
		return NULL;
	}

	oops = aee_oops_create(AE_DEFECT_FATAL, AE_KE, IPANIC_MODULE_TAG);
	if (oops == NULL) {
		LOGE("%s: can not allocate buffer\n", __func__);
		return NULL;
	}

	for (i = IPANIC_DT_HEADER + 1; i < IPANIC_DT_RESERVED31; i++) {
		dheader = &hdr->data_hdr[i];
		if (dheader->valid == 0) {
			continue;
		}
		data = ipanic_data_from_sd(dheader, 1);
		if (data) {
			switch (i) {
			case IPANIC_DT_KERNEL_LOG:
				oops->console = data;
				oops->console_len = dheader->used;
				break;
			case IPANIC_DT_MINI_RDUMP:
				oops->mini_rdump = data;
				oops->mini_rdump_len = dheader->used;
				break;
			case IPANIC_DT_MAIN_LOG:
				oops->android_main = data;
				oops->android_main_len = dheader->used;
				break;
			case IPANIC_DT_SYSTEM_LOG:
				oops->android_system = data;
				oops->android_system_len = dheader->used;
				break;
			case IPANIC_DT_EVENTS_LOG:
				/* Todo .. */
				break;
			case IPANIC_DT_RADIO_LOG:
				oops->android_radio = data;
				oops->android_radio_len = dheader->used;
				break;
			case IPANIC_DT_CURRENT_TSK:
				memcpy(oops->process_path, data, sizeof(struct aee_process_info));
				break;
			case IPANIC_DT_MMPROFILE:
				oops->mmprofile = data;
				oops->mmprofile_len = dheader->used;
				break;
			default:
				LOGI("%s: [%d] NOT USED.\n", __func__, i);
			}
		} else {
			LOGW("%s: read %s failed, %x@%x\n", __func__,
			     dheader->name, dheader->used, dheader->offset);
		}
	}
	return oops;
}

int ipanic(struct notifier_block *this, unsigned long event, void *ptr)
{
	struct ipanic_data_header *dheader;
	struct kmsg_dumper dumper;
    ipanic_atf_log_rec_t atf_log = {ATF_LOG_SIZE, 0, 0};
	int dt;
	int errno;
	struct ipanic_header *ipanic_hdr = ipanic_header();

#if 0//                           
		return NOTIFY_DONE;
#endif

	aee_rr_rec_fiq_step(AEE_FIQ_STEP_KE_IPANIC_START);
	bust_spinlocks(1);
	spin_lock_irq(&ipanic_lock);
	aee_disable_api();
	if (!ipanic_data_is_valid(IPANIC_DT_KERNEL_LOG)) {
		ipanic_klog_region(&dumper);
		errno = ipanic_data_to_sd(IPANIC_DT_KERNEL_LOG, &dumper);
		if (errno == -1)
			aee_nested_printf("$");
	}
	ipanic_klog_region(&dumper);
	errno = ipanic_data_to_sd(IPANIC_DT_OOPS_LOG, &dumper);
	if (errno == -1)
		aee_nested_printf("$");
	ipanic_data_to_sd(IPANIC_DT_CURRENT_TSK, 0);
	/* kick wdt after save the most critical infos */
	ipanic_kick_wdt();
	ipanic_data_to_sd(IPANIC_DT_MAIN_LOG, (void *)1);
	ipanic_data_to_sd(IPANIC_DT_SYSTEM_LOG, (void *)4);
	ipanic_data_to_sd(IPANIC_DT_EVENTS_LOG, (void *)2);
	ipanic_data_to_sd(IPANIC_DT_RADIO_LOG, (void *)3);
	aee_wdt_dump_info();
	ipanic_klog_region(&dumper);
	ipanic_data_to_sd(IPANIC_DT_WDT_LOG, &dumper);
#ifdef CONFIG_MTK_WQ_DEBUG
	mt_dump_wq_debugger();
#endif
	ipanic_klog_region(&dumper);
	ipanic_data_to_sd(IPANIC_DT_WQ_LOG, &dumper);
	ipanic_data_to_sd(IPANIC_DT_MMPROFILE, 0);
	ipanic_data_to_sd(IPANIC_DT_ATF_LOG, &atf_log);
	errno = ipanic_header_to_sd(0);
	if (!IS_ERR(ERR_PTR(errno)))
		mrdump_mini_ipanic_done();
	ipanic_klog_region(&dumper);
	ipanic_data_to_sd(IPANIC_DT_LAST_LOG, &dumper);
	LOGD("ipanic done^_^");
	for (dt = IPANIC_DT_HEADER + 1; dt < IPANIC_DT_RESERVED31; dt++) {
		dheader = &ipanic_hdr->data_hdr[dt];
		if (dheader->valid) {
			LOGD("%s[%x@%x],", dheader->name, dheader->used, dheader->offset);
		}
	}
	LOGD("^_^\n");
	aee_rr_rec_fiq_step(AEE_FIQ_STEP_KE_IPANIC_DONE);

	return NOTIFY_DONE;
}

void ipanic_recursive_ke(struct pt_regs *regs, struct pt_regs *excp_regs, int cpu)
{
	int errno;
	struct kmsg_dumper dumper;
	aee_nested_printf("minidump\n");
	bust_spinlocks(1);
	flush_cache_all();
#ifdef __aarch64__
	cpu_cache_off();
#else
	cpu_proc_fin();
#endif
	mrdump_mini_ke_cpu_regs(excp_regs);
	mrdump_mini_per_cpu_regs(cpu, regs);
	flush_cache_all();
	ipanic_mrdump_mini(AEE_REBOOT_MODE_NESTED_EXCEPTION, "Nested Panic");

	ipanic_data_to_sd(IPANIC_DT_CURRENT_TSK, 0);
	ipanic_kick_wdt();
	ipanic_klog_region(&dumper);
	ipanic_data_to_sd(IPANIC_DT_KERNEL_LOG, &dumper);
	errno = ipanic_header_to_sd(0);
	if (!IS_ERR(ERR_PTR(errno)))
		mrdump_mini_ipanic_done();
	if (ipanic_dt_active(IPANIC_DT_RAM_DUMP)) {
		aee_nested_printf("RAMDUMP.\n");
		__mrdump_create_oops_dump(AEE_REBOOT_MODE_NESTED_EXCEPTION, excp_regs,
					  "Nested Panic");
	}
	bust_spinlocks(0);
}
Esempio n. 9
0
struct inode *nilfs_new_inode(struct inode *dir, umode_t mode)
{
	struct super_block *sb = dir->i_sb;
	struct the_nilfs *nilfs = sb->s_fs_info;
	struct inode *inode;
	struct nilfs_inode_info *ii;
	struct nilfs_root *root;
	int err = -ENOMEM;
	ino_t ino;

	inode = new_inode(sb);
	if (unlikely(!inode))
		goto failed;

	mapping_set_gfp_mask(inode->i_mapping,
			     mapping_gfp_mask(inode->i_mapping) & ~__GFP_FS);

	root = NILFS_I(dir)->i_root;
	ii = NILFS_I(inode);
	ii->i_state = 1 << NILFS_I_NEW;
	ii->i_root = root;

	err = nilfs_ifile_create_inode(root->ifile, &ino, &ii->i_bh);
	if (unlikely(err))
		goto failed_ifile_create_inode;
	/* reference count of i_bh inherits from nilfs_mdt_read_block() */

	atomic_inc(&root->inodes_count);
	inode_init_owner(inode, dir, mode);
	inode->i_ino = ino;
	inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;

	if (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode)) {
		err = nilfs_bmap_read(ii->i_bmap, NULL);
		if (err < 0)
			goto failed_bmap;

		set_bit(NILFS_I_BMAP, &ii->i_state);
		/* No lock is needed; iget() ensures it. */
	}

	ii->i_flags = nilfs_mask_flags(
		mode, NILFS_I(dir)->i_flags & NILFS_FL_INHERITED);

	/* ii->i_file_acl = 0; */
	/* ii->i_dir_acl = 0; */
	ii->i_dir_start_lookup = 0;
	nilfs_set_inode_flags(inode);
	spin_lock(&nilfs->ns_next_gen_lock);
	inode->i_generation = nilfs->ns_next_generation++;
	spin_unlock(&nilfs->ns_next_gen_lock);
	insert_inode_hash(inode);

	err = nilfs_init_acl(inode, dir);
	if (unlikely(err))
		goto failed_acl; /* never occur. When supporting
				    nilfs_init_acl(), proper cancellation of
				    above jobs should be considered */

	return inode;

 failed_acl:
 failed_bmap:
	clear_nlink(inode);
	iput(inode);  /* raw_inode will be deleted through
			 generic_delete_inode() */
	goto failed;

 failed_ifile_create_inode:
	make_bad_inode(inode);
	iput(inode);  /* if i_nlink == 1, generic_forget_inode() will be
			 called */
 failed:
	return ERR_PTR(err);
}
Esempio n. 10
0
struct dentry *f2fs_get_parent(struct dentry *child)
{
	struct qstr dotdot = {.len = 2, .name = ".."};
	unsigned long ino = f2fs_inode_by_name(child->d_inode, &dotdot);
	if (!ino)
		return ERR_PTR(-ENOENT);
	return d_obtain_alias(f2fs_iget(child->d_inode->i_sb, ino));
}

static struct dentry *f2fs_lookup(struct inode *dir, struct dentry *dentry,
					struct nameidata *nd)
{
	struct inode *inode = NULL;
	struct f2fs_dir_entry *de;
	struct page *page;

	if (dentry->d_name.len > F2FS_NAME_LEN)
		return ERR_PTR(-ENAMETOOLONG);

	de = f2fs_find_entry(dir, &dentry->d_name, &page, nd ? nd->flags : 0);
	if (de) {
		nid_t ino = le32_to_cpu(de->ino);
		f2fs_dentry_kunmap(dir, page);
		f2fs_put_page(page, 0);

		inode = f2fs_iget(dir->i_sb, ino);
		if (IS_ERR(inode))
			return ERR_CAST(inode);
	}

	return d_splice_alias(inode, dentry);
}

static int f2fs_unlink(struct inode *dir, struct dentry *dentry)
{
	struct f2fs_sb_info *sbi = F2FS_I_SB(dir);
	struct inode *inode = dentry->d_inode;
	struct f2fs_dir_entry *de;
	struct page *page;
	int err = -ENOENT;

	trace_f2fs_unlink_enter(dir, dentry);
	f2fs_balance_fs(sbi);

	de = f2fs_find_entry(dir, &dentry->d_name, &page, 0);
	if (!de)
		goto fail;

	f2fs_lock_op(sbi);
	err = acquire_orphan_inode(sbi);
	if (err) {
		f2fs_unlock_op(sbi);
		f2fs_dentry_kunmap(dir, page);
		f2fs_put_page(page, 0);
		goto fail;
	}
	f2fs_delete_entry(de, page, dir, inode);
	f2fs_unlock_op(sbi);

	/* In order to evict this inode, we set it dirty */
	mark_inode_dirty(inode);
fail:
	trace_f2fs_unlink_exit(inode, err);
	return err;
}

static int f2fs_symlink(struct inode *dir, struct dentry *dentry,
					const char *symname)
{
	struct f2fs_sb_info *sbi = F2FS_I_SB(dir);
	struct inode *inode;
	size_t symlen = strlen(symname) + 1;
	int err;

	f2fs_balance_fs(sbi);

	inode = f2fs_new_inode(dir, S_IFLNK | S_IRWXUGO);
	if (IS_ERR(inode))
		return PTR_ERR(inode);

	inode->i_op = &f2fs_symlink_inode_operations;
	inode->i_mapping->a_ops = &f2fs_dblock_aops;

	f2fs_lock_op(sbi);
	err = f2fs_add_link(dentry, inode);
	if (err)
		goto out;
	f2fs_unlock_op(sbi);

	err = page_symlink(inode, symname, symlen);
	alloc_nid_done(sbi, inode->i_ino);

	d_instantiate(dentry, inode);
	unlock_new_inode(inode);
	return err;
out:
	handle_failed_inode(inode);
	return err;
}

static int f2fs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
{
	struct f2fs_sb_info *sbi = F2FS_I_SB(dir);
	struct inode *inode;
	int err;

	f2fs_balance_fs(sbi);

	inode = f2fs_new_inode(dir, S_IFDIR | mode);
	if (IS_ERR(inode))
		return PTR_ERR(inode);

	inode->i_op = &f2fs_dir_inode_operations;
	inode->i_fop = &f2fs_dir_operations;
	inode->i_mapping->a_ops = &f2fs_dblock_aops;
	mapping_set_gfp_mask(inode->i_mapping, GFP_F2FS_ZERO);

	set_inode_flag(F2FS_I(inode), FI_INC_LINK);
	f2fs_lock_op(sbi);
	err = f2fs_add_link(dentry, inode);
	if (err)
		goto out_fail;
	f2fs_unlock_op(sbi);

	stat_inc_inline_dir(inode);
	alloc_nid_done(sbi, inode->i_ino);

	d_instantiate(dentry, inode);
	unlock_new_inode(inode);

	return 0;

out_fail:
	clear_inode_flag(F2FS_I(inode), FI_INC_LINK);
	handle_failed_inode(inode);
	return err;
}

static int f2fs_rmdir(struct inode *dir, struct dentry *dentry)
{
	struct inode *inode = dentry->d_inode;
	if (f2fs_empty_dir(inode))
		return f2fs_unlink(dir, dentry);
	return -ENOTEMPTY;
}

static int f2fs_mknod(struct inode *dir, struct dentry *dentry,
				umode_t mode, dev_t rdev)
{
	struct f2fs_sb_info *sbi = F2FS_I_SB(dir);
	struct inode *inode;
	int err = 0;

	if (!new_valid_dev(rdev))
		return -EINVAL;

	f2fs_balance_fs(sbi);

	inode = f2fs_new_inode(dir, mode);
	if (IS_ERR(inode))
		return PTR_ERR(inode);

	init_special_inode(inode, inode->i_mode, rdev);
	inode->i_op = &f2fs_special_inode_operations;

	f2fs_lock_op(sbi);
	err = f2fs_add_link(dentry, inode);
	if (err)
		goto out;
	f2fs_unlock_op(sbi);

	alloc_nid_done(sbi, inode->i_ino);
	d_instantiate(dentry, inode);
	unlock_new_inode(inode);
	return 0;
out:
	handle_failed_inode(inode);
	return err;
}

static int f2fs_rename(struct inode *old_dir, struct dentry *old_dentry,
			struct inode *new_dir, struct dentry *new_dentry)
{
	struct f2fs_sb_info *sbi = F2FS_I_SB(old_dir);
	struct inode *old_inode = old_dentry->d_inode;
	struct inode *new_inode = new_dentry->d_inode;
	struct page *old_dir_page;
	struct page *old_page, *new_page;
	struct f2fs_dir_entry *old_dir_entry = NULL;
	struct f2fs_dir_entry *old_entry;
	struct f2fs_dir_entry *new_entry;
	int err = -ENOENT;

	f2fs_balance_fs(sbi);

	old_entry = f2fs_find_entry(old_dir, &old_dentry->d_name, &old_page, 0);
	if (!old_entry)
		goto out;

	if (S_ISDIR(old_inode->i_mode)) {
		err = -EIO;
		old_dir_entry = f2fs_parent_dir(old_inode, &old_dir_page);
		if (!old_dir_entry)
			goto out_old;
	}

	if (new_inode) {

		err = -ENOTEMPTY;
		if (old_dir_entry && !f2fs_empty_dir(new_inode))
			goto out_dir;

		err = -ENOENT;
		new_entry = f2fs_find_entry(new_dir, &new_dentry->d_name,
						&new_page, 0);
		if (!new_entry)
			goto out_dir;

		f2fs_lock_op(sbi);

		err = acquire_orphan_inode(sbi);
		if (err)
			goto put_out_dir;

		if (update_dent_inode(old_inode, &new_dentry->d_name)) {
			release_orphan_inode(sbi);
			goto put_out_dir;
		}

		f2fs_set_link(new_dir, new_entry, new_page, old_inode);

		new_inode->i_ctime = CURRENT_TIME;
		down_write(&F2FS_I(new_inode)->i_sem);
		if (old_dir_entry)
			drop_nlink(new_inode);
		drop_nlink(new_inode);
		up_write(&F2FS_I(new_inode)->i_sem);

		mark_inode_dirty(new_inode);

		if (!new_inode->i_nlink)
			add_orphan_inode(sbi, new_inode->i_ino);
		else
			release_orphan_inode(sbi);

		update_inode_page(old_inode);
		update_inode_page(new_inode);
	} else {
		f2fs_lock_op(sbi);

		err = f2fs_add_link(new_dentry, old_inode);
		if (err) {
			f2fs_unlock_op(sbi);
			goto out_dir;
		}

		if (old_dir_entry) {
			inc_nlink(new_dir);
			update_inode_page(new_dir);
		}
	}

	down_write(&F2FS_I(old_inode)->i_sem);
	file_lost_pino(old_inode);
	up_write(&F2FS_I(old_inode)->i_sem);

	old_inode->i_ctime = CURRENT_TIME;
	mark_inode_dirty(old_inode);

	f2fs_delete_entry(old_entry, old_page, old_dir, NULL);

	if (old_dir_entry) {
		if (old_dir != new_dir) {
			f2fs_set_link(old_inode, old_dir_entry,
						old_dir_page, new_dir);
			update_inode_page(old_inode);
		} else {
			f2fs_dentry_kunmap(old_inode, old_dir_page);
			f2fs_put_page(old_dir_page, 0);
		}
		drop_nlink(old_dir);
		mark_inode_dirty(old_dir);
		update_inode_page(old_dir);
	}

	f2fs_unlock_op(sbi);
	return 0;

put_out_dir:
	f2fs_unlock_op(sbi);
	f2fs_dentry_kunmap(new_dir, new_page);
	f2fs_put_page(new_page, 0);
out_dir:
	if (old_dir_entry) {
		f2fs_dentry_kunmap(old_inode, old_dir_page);
		f2fs_put_page(old_dir_page, 0);
	}
out_old:
	f2fs_dentry_kunmap(old_dir, old_page);
	f2fs_put_page(old_page, 0);
out:
	return err;
}

const struct inode_operations f2fs_dir_inode_operations = {
	.create		= f2fs_create,
	.lookup		= f2fs_lookup,
	.link		= f2fs_link,
	.unlink		= f2fs_unlink,
	.symlink	= f2fs_symlink,
	.mkdir		= f2fs_mkdir,
	.rmdir		= f2fs_rmdir,
	.mknod		= f2fs_mknod,
	.rename		= f2fs_rename,
	.getattr	= f2fs_getattr,
	.setattr	= f2fs_setattr,
	.get_acl	= f2fs_get_acl,
#ifdef CONFIG_F2FS_FS_XATTR
	.setxattr	= generic_setxattr,
	.getxattr	= generic_getxattr,
	.listxattr	= f2fs_listxattr,
	.removexattr	= generic_removexattr,
#endif
};

const struct inode_operations f2fs_symlink_inode_operations = {
	.readlink       = generic_readlink,
	.follow_link    = page_follow_link_light,
	.put_link       = page_put_link,
	.getattr	= f2fs_getattr,
	.setattr	= f2fs_setattr,
#ifdef CONFIG_F2FS_FS_XATTR
	.setxattr	= generic_setxattr,
	.getxattr	= generic_getxattr,
	.listxattr	= f2fs_listxattr,
	.removexattr	= generic_removexattr,
#endif
};

const struct inode_operations f2fs_special_inode_operations = {
	.getattr	= f2fs_getattr,
	.setattr        = f2fs_setattr,
	.get_acl	= f2fs_get_acl,
#ifdef CONFIG_F2FS_FS_XATTR
	.setxattr       = generic_setxattr,
	.getxattr       = generic_getxattr,
	.listxattr	= f2fs_listxattr,
	.removexattr    = generic_removexattr,
#endif
};
Esempio n. 11
0
int
#endif
vnode_shadow_iop_follow_link(
    DENT_T *dentry,            /* entry we are trying to resolve */
    struct nameidata *nd       /* Contains parent dentry */
)
{
    int err = 0;
    int len = PATH_MAX;
    char *buff;
    mm_segment_t old_fs;        /* Because we provide a kernel buffer. */
    INODE_T *real_inode;
    DENT_T *real_dentry;
    VNODE_T *cvp;

    /* this function must consume a reference on base */
    /* We only path_release on error. */

    err = 0;

    real_dentry = REALDENTRY_LOCKED(dentry, &cvp);
    if (real_dentry == NULL) {
	err = -ENOENT;
        MDKI_PATH_RELEASE(nd);
        goto out_nolock;
    }
    VNODE_DGET(real_dentry);             /* protect inode */
    if (real_dentry->d_inode == NULL) {
        /* delete race */
	err = -ENOENT;
        MDKI_PATH_RELEASE(nd);
        goto out;
    }
    real_inode = real_dentry->d_inode;
    /* If there are no underlying symlink functions, we are done */
    if (real_inode->i_op && real_inode->i_op->readlink &&
        real_inode->i_op->follow_link)
    {
        buff = KMEM_ALLOC(len, KM_SLEEP);
        if (!buff) {
            MDKI_PATH_RELEASE(nd);
            err = -ENOMEM;
            goto out;
        }
        /* We're providing a kernel buffer to copy into, so let everyone know. */
        old_fs = get_fs();
        set_fs(KERNEL_DS);
        err = vnode_shadow_iop_readlink(dentry, buff, len);
        set_fs(old_fs);
        if (err < 0) {
            KMEM_FREE(buff, len);
            MDKI_PATH_RELEASE(nd);
            goto out;
        }
        /* done with dentry */
        /* Make sure string is null terminated */
        buff[err] = 0;
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,13)
        err = vfs_follow_link(nd, buff);
        KMEM_FREE(buff,len);
#else
        VNODE_DPUT(real_dentry);
        REALDENTRY_UNLOCK(dentry, cvp);
        nd_set_link(nd, buff);
        return(buff); /* vnop_iop_put_link() will free this buf. */
#endif
    }
out:
    VNODE_DPUT(real_dentry);
    REALDENTRY_UNLOCK(dentry, cvp);
out_nolock:
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,13)
    return ERR_PTR(err);
#else
    return(err);
#endif
}
Esempio n. 12
0
static struct file *__dentry_open(struct dentry *dentry, struct vfsmount *mnt,
					struct file *f,
					int (*open)(struct inode *, struct file *),
					const struct cred *cred)
{
	static const struct file_operations empty_fops = {};
	struct inode *inode;
	int error;

	f->f_mode = OPEN_FMODE(f->f_flags) | FMODE_LSEEK |
				FMODE_PREAD | FMODE_PWRITE;

	if (unlikely(f->f_flags & O_PATH))
		f->f_mode = FMODE_PATH;

	inode = dentry->d_inode;
	if (f->f_mode & FMODE_WRITE) {
		error = __get_file_write_access(inode, mnt);
		if (error)
			goto cleanup_file;
		if (!special_file(inode->i_mode))
			file_take_write(f);
	}

	f->f_mapping = inode->i_mapping;
	f->f_path.dentry = dentry;
	f->f_path.mnt = mnt;
	f->f_pos = 0;
	file_sb_list_add(f, inode->i_sb);

	if (unlikely(f->f_mode & FMODE_PATH)) {
		f->f_op = &empty_fops;
		return f;
	}

	f->f_op = fops_get(inode->i_fop);

	error = security_dentry_open(f, cred);
	if (error)
		goto cleanup_all;

	if (!open && f->f_op)
		open = f->f_op->open;
	if (open) {
		error = open(inode, f);
		if (error)
			goto cleanup_all;
	}
	if ((f->f_mode & (FMODE_READ | FMODE_WRITE)) == FMODE_READ)
		i_readcount_inc(inode);

	f->f_flags &= ~(O_CREAT | O_EXCL | O_NOCTTY | O_TRUNC);

	file_ra_state_init(&f->f_ra, f->f_mapping->host->i_mapping);

	/* NB: we're sure to have correct a_ops only after f_op->open */
	if (f->f_flags & O_DIRECT) {
		if (!f->f_mapping->a_ops ||
		    ((!f->f_mapping->a_ops->direct_IO) &&
		    (!f->f_mapping->a_ops->get_xip_mem))) {
			fput(f);
			f = ERR_PTR(-EINVAL);
		}
	}

	return f;

cleanup_all:
	fops_put(f->f_op);
	if (f->f_mode & FMODE_WRITE) {
		put_write_access(inode);
		if (!special_file(inode->i_mode)) {
			/*
			 * We don't consider this a real
			 * mnt_want/drop_write() pair
			 * because it all happenend right
			 * here, so just reset the state.
			 */
			file_reset_write(f);
			mnt_drop_write(mnt);
		}
	}
	file_sb_list_del(f);
	f->f_path.dentry = NULL;
	f->f_path.mnt = NULL;
cleanup_file:
	put_filp(f);
	dput(dentry);
	mntput(mnt);
	return ERR_PTR(error);
}
Esempio n. 13
0
/**
 * adreno_drawctxt_create - create a new adreno draw context
 * @dev_priv: the owner of the context
 * @flags: flags for the context (passed from user space)
 *
 * Create and return a new draw context for the 3D core.
 */
struct kgsl_context *
adreno_drawctxt_create(struct kgsl_device_private *dev_priv,
			uint32_t *flags)
{
	struct adreno_context *drawctxt;
	struct kgsl_device *device = dev_priv->device;
	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
	int ret;
	unsigned long local;

	local = *flags & (KGSL_CONTEXT_PREAMBLE |
		KGSL_CONTEXT_NO_GMEM_ALLOC |
		KGSL_CONTEXT_PER_CONTEXT_TS |
		KGSL_CONTEXT_USER_GENERATED_TS |
		KGSL_CONTEXT_NO_FAULT_TOLERANCE |
		KGSL_CONTEXT_CTX_SWITCH |
		KGSL_CONTEXT_PRIORITY_MASK |
		KGSL_CONTEXT_TYPE_MASK |
		KGSL_CONTEXT_PWR_CONSTRAINT |
		KGSL_CONTEXT_IFH_NOP |
		KGSL_CONTEXT_SECURE);

	/* Check for errors before trying to initialize */

	/* We no longer support legacy context switching */
	if ((local & KGSL_CONTEXT_PREAMBLE) == 0 ||
		(local & KGSL_CONTEXT_NO_GMEM_ALLOC) == 0) {
		KGSL_DEV_ERR_ONCE(device,
			"legacy context switch not supported\n");
		return ERR_PTR(-EINVAL);
	}

	/* Make sure that our target can support secure contexts if requested */
	if (!kgsl_mmu_is_secured(&dev_priv->device->mmu) &&
			(local & KGSL_CONTEXT_SECURE)) {
		KGSL_DEV_ERR_ONCE(device, "Secure context not supported\n");
		return ERR_PTR(-EOPNOTSUPP);
	}

	drawctxt = kzalloc(sizeof(struct adreno_context), GFP_KERNEL);

	if (drawctxt == NULL)
		return ERR_PTR(-ENOMEM);

	ret = kgsl_context_init(dev_priv, &drawctxt->base);
	if (ret != 0) {
		kfree(drawctxt);
		return ERR_PTR(ret);
	}

	drawctxt->timestamp = 0;

	drawctxt->base.flags = local;

	/* Always enable per-context timestamps */
	drawctxt->base.flags |= KGSL_CONTEXT_PER_CONTEXT_TS;
	drawctxt->type = (drawctxt->base.flags & KGSL_CONTEXT_TYPE_MASK)
		>> KGSL_CONTEXT_TYPE_SHIFT;
	spin_lock_init(&drawctxt->lock);
	init_waitqueue_head(&drawctxt->wq);
	init_waitqueue_head(&drawctxt->waiting);

	/* Set the context priority */
	_set_context_priority(drawctxt);
	/* set the context ringbuffer */
	drawctxt->rb = adreno_ctx_get_rb(adreno_dev, drawctxt);

	/*
	 * Set up the plist node for the dispatcher.  Insert the node into the
	 * drawctxt pending list based on priority.
	 */
	plist_node_init(&drawctxt->pending, drawctxt->base.priority);

	kgsl_sharedmem_writel(device, &device->memstore,
			KGSL_MEMSTORE_OFFSET(drawctxt->base.id, soptimestamp),
			0);
	kgsl_sharedmem_writel(device, &device->memstore,
			KGSL_MEMSTORE_OFFSET(drawctxt->base.id, eoptimestamp),
			0);

	adreno_context_debugfs_init(ADRENO_DEVICE(device), drawctxt);

	/* copy back whatever flags we dediced were valid */
	*flags = drawctxt->base.flags;
	return &drawctxt->base;
}
Esempio n. 14
0
static struct hfs_bnode *hfs_bnode_split(struct hfs_find_data *fd)
{
	struct hfs_btree *tree;
	struct hfs_bnode *node, *new_node, *next_node;
	struct hfs_bnode_desc node_desc;
	int num_recs, new_rec_off, new_off, old_rec_off;
	int data_start, data_end, size;

	tree = fd->tree;
	node = fd->bnode;
	new_node = hfs_bmap_alloc(tree);
	if (IS_ERR(new_node))
		return new_node;
	hfs_bnode_get(node);
	hfs_dbg(BNODE_MOD, "split_nodes: %d - %d - %d\n",
		node->this, new_node->this, node->next);
	new_node->next = node->next;
	new_node->prev = node->this;
	new_node->parent = node->parent;
	new_node->type = node->type;
	new_node->height = node->height;

	if (node->next)
		next_node = hfs_bnode_find(tree, node->next);
	else
		next_node = NULL;

	if (IS_ERR(next_node)) {
		hfs_bnode_put(node);
		hfs_bnode_put(new_node);
		return next_node;
	}

	size = tree->node_size / 2 - node->num_recs * 2 - 14;
	old_rec_off = tree->node_size - 4;
	num_recs = 1;
	for (;;) {
		data_start = hfs_bnode_read_u16(node, old_rec_off);
		if (data_start > size)
			break;
		old_rec_off -= 2;
		if (++num_recs < node->num_recs)
			continue;
		/* panic? */
		hfs_bnode_put(node);
		hfs_bnode_put(new_node);
		if (next_node)
			hfs_bnode_put(next_node);
		return ERR_PTR(-ENOSPC);
	}

	if (fd->record + 1 < num_recs) {
		/* new record is in the lower half,
		 * so leave some more space there
		 */
		old_rec_off += 2;
		num_recs--;
		data_start = hfs_bnode_read_u16(node, old_rec_off);
	} else {
		hfs_bnode_put(node);
		hfs_bnode_get(new_node);
		fd->bnode = new_node;
		fd->record -= num_recs;
		fd->keyoffset -= data_start - 14;
		fd->entryoffset -= data_start - 14;
	}
	new_node->num_recs = node->num_recs - num_recs;
	node->num_recs = num_recs;

	new_rec_off = tree->node_size - 2;
	new_off = 14;
	size = data_start - new_off;
	num_recs = new_node->num_recs;
	data_end = data_start;
	while (num_recs) {
		hfs_bnode_write_u16(new_node, new_rec_off, new_off);
		old_rec_off -= 2;
		new_rec_off -= 2;
		data_end = hfs_bnode_read_u16(node, old_rec_off);
		new_off = data_end - size;
		num_recs--;
	}
	hfs_bnode_write_u16(new_node, new_rec_off, new_off);
	hfs_bnode_copy(new_node, 14, node, data_start, data_end - data_start);

	/* update new bnode header */
	node_desc.next = cpu_to_be32(new_node->next);
	node_desc.prev = cpu_to_be32(new_node->prev);
	node_desc.type = new_node->type;
	node_desc.height = new_node->height;
	node_desc.num_recs = cpu_to_be16(new_node->num_recs);
	node_desc.reserved = 0;
	hfs_bnode_write(new_node, &node_desc, 0, sizeof(node_desc));

	/* update previous bnode header */
	node->next = new_node->this;
	hfs_bnode_read(node, &node_desc, 0, sizeof(node_desc));
	node_desc.next = cpu_to_be32(node->next);
	node_desc.num_recs = cpu_to_be16(node->num_recs);
	hfs_bnode_write(node, &node_desc, 0, sizeof(node_desc));

	/* update next bnode header */
	if (next_node) {
		next_node->prev = new_node->this;
		hfs_bnode_read(next_node, &node_desc, 0, sizeof(node_desc));
		node_desc.prev = cpu_to_be32(next_node->prev);
		hfs_bnode_write(next_node, &node_desc, 0, sizeof(node_desc));
		hfs_bnode_put(next_node);
	} else if (node->this == tree->leaf_tail) {
Esempio n. 15
0
/*
 * This creates a new process as a copy of the old one,
 * but does not actually start it yet.
 *
 * It copies the registers, and all the appropriate
 * parts of the process environment (as per the clone
 * flags).  The actual kick-off is left to the caller.
 */
struct task_struct *copy_process(unsigned long clone_flags,
			         unsigned long stack_start,
			         struct pt_regs *regs,
			         unsigned long stack_size,
			         int *parent_tidptr,
			         int *child_tidptr)
{
	int retval;
	struct task_struct *p = NULL;

	if ((clone_flags & (CLONE_NEWNS|CLONE_FS)) == (CLONE_NEWNS|CLONE_FS))
		return ERR_PTR(-EINVAL);

	/*
	 * Thread groups must share signals as well, and detached threads
	 * can only be started up within the thread group.
	 */
	if ((clone_flags & CLONE_THREAD) && !(clone_flags & CLONE_SIGHAND))
		return ERR_PTR(-EINVAL);
	if ((clone_flags & CLONE_DETACHED) && !(clone_flags & CLONE_THREAD))
		return ERR_PTR(-EINVAL);
	if (!(clone_flags & CLONE_DETACHED) && (clone_flags & CLONE_THREAD))
		return ERR_PTR(-EINVAL);

	retval = -ENOMEM;
	p = dup_task_struct(current);
	if (!p)
		goto fork_out;

	p->tux_info = NULL;

	retval = -EAGAIN;

	/*
	 * Increment user->__count before the rlimit test so that it would
	 * be correct if we take the bad_fork_free failure path.
	 */
	atomic_inc(&p->user->__count);
	if (atomic_read(&p->user->processes) >= p->rlim[RLIMIT_NPROC].rlim_cur) {
		if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE))
			goto bad_fork_free;
	}

	atomic_inc(&p->user->processes);

	/*
	 * Counter increases are protected by
	 * the kernel lock so nr_threads can't
	 * increase under us (but it may decrease).
	 */
	if (nr_threads >= max_threads)
		goto bad_fork_cleanup_count;
	
	get_exec_domain(p->exec_domain);

	if (p->binfmt && p->binfmt->module)
		__MOD_INC_USE_COUNT(p->binfmt->module);

	p->did_exec = 0;
	p->swappable = 0;
	p->state = TASK_UNINTERRUPTIBLE;

	copy_flags(clone_flags, p);
	if (clone_flags & CLONE_IDLETASK)
		p->pid = 0;
	else {
		p->pid = alloc_pidmap();
		if (p->pid == -1)
			goto bad_fork_cleanup;
	}
	
	retval = -EFAULT;
	if (clone_flags & CLONE_PARENT_SETTID)
		if (put_user(p->pid, parent_tidptr))
			goto bad_fork_cleanup;

	INIT_LIST_HEAD(&p->run_list);

	INIT_LIST_HEAD(&p->children);
	INIT_LIST_HEAD(&p->sibling);
	init_waitqueue_head(&p->wait_chldexit);
	p->vfork_done = NULL;
	spin_lock_init(&p->alloc_lock);
	spin_lock_init(&p->switch_lock);

	p->sigpending = 0;
	init_sigpending(&p->pending);

	p->it_real_value = p->it_virt_value = p->it_prof_value = 0;
	p->it_real_incr = p->it_virt_incr = p->it_prof_incr = 0;
	init_timer(&p->real_timer);
	p->real_timer.data = (unsigned long) p;

	p->leader = 0;		/* session leadership doesn't inherit */
	p->tty_old_pgrp = 0;
	memset(&p->utime, 0, sizeof(p->utime));
	memset(&p->stime, 0, sizeof(p->stime));
	memset(&p->cutime, 0, sizeof(p->cutime));
	memset(&p->cstime, 0, sizeof(p->cstime));
	memset(&p->group_utime, 0, sizeof(p->group_utime));
	memset(&p->group_stime, 0, sizeof(p->group_stime));
	memset(&p->group_cutime, 0, sizeof(p->group_cutime));
	memset(&p->group_cstime, 0, sizeof(p->group_cstime));

#ifdef CONFIG_SMP
	memset(&p->per_cpu_utime, 0, sizeof(p->per_cpu_utime));
	memset(&p->per_cpu_stime, 0, sizeof(p->per_cpu_stime));
#endif

	memset(&p->timing_state, 0, sizeof(p->timing_state));
	p->timing_state.type = PROCESS_TIMING_USER;
	p->last_sigxcpu = 0;
	p->array = NULL;
	p->lock_depth = -1;		/* -1 = no lock */
	p->start_time = jiffies;

	retval = -ENOMEM;
	/* copy all the process information */
	if (copy_files(clone_flags, p))
		goto bad_fork_cleanup;
	if (copy_fs(clone_flags, p))
		goto bad_fork_cleanup_files;
	if (copy_sighand(clone_flags, p))
		goto bad_fork_cleanup_fs;
	if (copy_signal(clone_flags, p))
		goto bad_fork_cleanup_sighand;
	if (copy_mm(clone_flags, p))
		goto bad_fork_cleanup_signal;
	if (copy_namespace(clone_flags, p))
		goto bad_fork_cleanup_mm;
	retval = copy_thread(0, clone_flags, stack_start, stack_size, p, regs);
	if (retval)
		goto bad_fork_cleanup_namespace;
	p->semundo = NULL;

	p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID)
		? child_tidptr : NULL;
	/*
	 * Clear TID on mm_release()?
	 */
	p->clear_child_tid = (clone_flags & CLONE_CHILD_CLEARTID)
		? child_tidptr : NULL;

	/* Our parent execution domain becomes current domain
	   These must match for thread signalling to apply */
	   
	p->parent_exec_id = p->self_exec_id;

	/* ok, now we should be set up.. */
	p->swappable = 1;
	if (clone_flags & CLONE_DETACHED)
		p->exit_signal = -1;
	else
		p->exit_signal = clone_flags & CSIGNAL;
	p->pdeath_signal = 0;

	/*
	 * Share the timeslice between parent and child, thus the
	 * total amount of pending timeslices in the system doesnt change,
	 * resulting in more scheduling fairness.
	 */
	local_irq_disable();
	p->time_slice = (current->time_slice + 1) >> 1;
	p->first_time_slice = 1;
	/*
	 * The remainder of the first timeslice might be recovered by
	 * the parent if the child exits early enough.
	 */
	current->time_slice >>= 1;
	p->last_run = jiffies;
	if (!current->time_slice) {
		/*
		 * This case is rare, it happens when the parent has only
		 * a single jiffy left from its timeslice. Taking the
		 * runqueue lock is not a problem.
		 */
		current->time_slice = 1;
		scheduler_tick(0 /* don't update the time stats */);
	}
	local_irq_enable();

	if ((int)current->time_slice <= 0)
		BUG();
	if ((int)p->time_slice <= 0)
		BUG();

	/*
	 * Ok, add it to the run-queues and make it
	 * visible to the rest of the system.
	 *
	 * Let it rip!
	 */
	p->tgid = p->pid;
	p->group_leader = p;
	INIT_LIST_HEAD(&p->ptrace_children);
	INIT_LIST_HEAD(&p->ptrace_list);

	/* Need tasklist lock for parent etc handling! */
	write_lock_irq(&tasklist_lock);
	/*
	 * Check for pending SIGKILL! The new thread should not be allowed
	 * to slip out of an OOM kill. (or normal SIGKILL.)
	 */
	if (sigismember(&current->pending.signal, SIGKILL)) {
		write_unlock_irq(&tasklist_lock);
		retval = -EINTR;
		goto bad_fork_cleanup_namespace;
	}

	/* CLONE_PARENT re-uses the old parent */
	if (clone_flags & (CLONE_PARENT|CLONE_THREAD))
		p->real_parent = current->real_parent;
	else
		p->real_parent = current;
	p->parent = p->real_parent;

	if (clone_flags & CLONE_THREAD) {
		spin_lock(&current->sighand->siglock);
		/*
		 * Important: if an exit-all has been started then
		 * do not create this new thread - the whole thread
		 * group is supposed to exit anyway.
		 */
		if (current->signal->group_exit) {
			spin_unlock(&current->sighand->siglock);
			write_unlock_irq(&tasklist_lock);
			retval = -EINTR;
			goto bad_fork_cleanup_namespace;
		}
		p->tgid = current->tgid;
		p->group_leader = current->group_leader;

		if (current->signal->group_stop_count > 0) {
			/*
			 * There is an all-stop in progress for the group.
			 * We ourselves will stop as soon as we check signals.
			 * Make the new thread part of that group stop too.
			 */
			current->signal->group_stop_count++;
			p->sigpending = 1;
		}

		spin_unlock(&current->sighand->siglock);
	}

	SET_LINKS(p);
	if (p->ptrace & PT_PTRACED)
		__ptrace_link(p, current->parent);

	attach_pid(p, PIDTYPE_PID, p->pid);
	if (thread_group_leader(p)) {
		attach_pid(p, PIDTYPE_TGID, p->tgid);
		attach_pid(p, PIDTYPE_PGID, p->pgrp);
		attach_pid(p, PIDTYPE_SID, p->session);
	} else {
		link_pid(p, p->pids + PIDTYPE_TGID,
			&p->group_leader->pids[PIDTYPE_TGID].pid);
	}

	/* clear controlling tty of new task if parent's was just cleared */
	if (!current->tty && p->tty)
		p->tty = NULL;

	nr_threads++;
	write_unlock_irq(&tasklist_lock);
	retval = 0;

fork_out:
	if (retval)
		return ERR_PTR(retval);
	return p;

bad_fork_cleanup_namespace:
	exit_namespace(p);
bad_fork_cleanup_mm:
	exit_mm(p);
	if (p->active_mm)
		mmdrop(p->active_mm);
bad_fork_cleanup_signal:
	exit_signal(p);
bad_fork_cleanup_sighand:
	exit_sighand(p);
bad_fork_cleanup_fs:
	exit_fs(p); /* blocking */
bad_fork_cleanup_files:
	exit_files(p); /* blocking */
bad_fork_cleanup:
	if (p->pid > 0)
		free_pidmap(p->pid);
	put_exec_domain(p->exec_domain);
	if (p->binfmt && p->binfmt->module)
		__MOD_DEC_USE_COUNT(p->binfmt->module);
bad_fork_cleanup_count:
	atomic_dec(&p->user->processes);
bad_fork_free:
	p->state = TASK_ZOMBIE; /* debug */
	atomic_dec(&p->usage);
	put_task_struct(p);
	goto fork_out;
}
Esempio n. 16
0
struct inode *nilfs_ilookup(struct super_block *sb, struct nilfs_root *root,
			    unsigned long ino)
{
	struct nilfs_iget_args args = {
		.ino = ino, .root = root, .cno = 0, .for_gc = 0
	};

	return ilookup5(sb, ino, nilfs_iget_test, &args);
}

struct inode *nilfs_iget_locked(struct super_block *sb, struct nilfs_root *root,
				unsigned long ino)
{
	struct nilfs_iget_args args = {
		.ino = ino, .root = root, .cno = 0, .for_gc = 0
	};

	return iget5_locked(sb, ino, nilfs_iget_test, nilfs_iget_set, &args);
}

struct inode *nilfs_iget(struct super_block *sb, struct nilfs_root *root,
			 unsigned long ino)
{
	struct inode *inode;
	int err;

	inode = nilfs_iget_locked(sb, root, ino);
	if (unlikely(!inode))
		return ERR_PTR(-ENOMEM);
	if (!(inode->i_state & I_NEW))
		return inode;

	err = __nilfs_read_inode(sb, root, ino, inode);
	if (unlikely(err)) {
		iget_failed(inode);
		return ERR_PTR(err);
	}
	unlock_new_inode(inode);
	return inode;
}

struct inode *nilfs_iget_for_gc(struct super_block *sb, unsigned long ino,
				__u64 cno)
{
	struct nilfs_iget_args args = {
		.ino = ino, .root = NULL, .cno = cno, .for_gc = 1
	};
	struct inode *inode;
	int err;

	inode = iget5_locked(sb, ino, nilfs_iget_test, nilfs_iget_set, &args);
	if (unlikely(!inode))
		return ERR_PTR(-ENOMEM);
	if (!(inode->i_state & I_NEW))
		return inode;

	err = nilfs_init_gcinode(inode);
	if (unlikely(err)) {
		iget_failed(inode);
		return ERR_PTR(err);
	}
	unlock_new_inode(inode);
	return inode;
}

void nilfs_write_inode_common(struct inode *inode,
			      struct nilfs_inode *raw_inode, int has_bmap)
{
	struct nilfs_inode_info *ii = NILFS_I(inode);

	raw_inode->i_mode = cpu_to_le16(inode->i_mode);
	raw_inode->i_uid = cpu_to_le32(inode->i_uid);
	raw_inode->i_gid = cpu_to_le32(inode->i_gid);
	raw_inode->i_links_count = cpu_to_le16(inode->i_nlink);
	raw_inode->i_size = cpu_to_le64(inode->i_size);
	raw_inode->i_ctime = cpu_to_le64(inode->i_ctime.tv_sec);
	raw_inode->i_mtime = cpu_to_le64(inode->i_mtime.tv_sec);
	raw_inode->i_ctime_nsec = cpu_to_le32(inode->i_ctime.tv_nsec);
	raw_inode->i_mtime_nsec = cpu_to_le32(inode->i_mtime.tv_nsec);
	raw_inode->i_blocks = cpu_to_le64(inode->i_blocks);

	raw_inode->i_flags = cpu_to_le32(ii->i_flags);
	raw_inode->i_generation = cpu_to_le32(inode->i_generation);

	if (NILFS_ROOT_METADATA_FILE(inode->i_ino)) {
		struct the_nilfs *nilfs = inode->i_sb->s_fs_info;

		/* zero-fill unused portion in the case of super root block */
		raw_inode->i_xattr = 0;
		raw_inode->i_pad = 0;
		memset((void *)raw_inode + sizeof(*raw_inode), 0,
		       nilfs->ns_inode_size - sizeof(*raw_inode));
	}

	if (has_bmap)
		nilfs_bmap_write(ii->i_bmap, raw_inode);
	else if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode))
		raw_inode->i_device_code =
			cpu_to_le64(huge_encode_dev(inode->i_rdev));
	/* When extending inode, nilfs->ns_inode_size should be checked
	   for substitutions of appended fields */
}

void nilfs_update_inode(struct inode *inode, struct buffer_head *ibh)
{
	ino_t ino = inode->i_ino;
	struct nilfs_inode_info *ii = NILFS_I(inode);
	struct inode *ifile = ii->i_root->ifile;
	struct nilfs_inode *raw_inode;

	raw_inode = nilfs_ifile_map_inode(ifile, ino, ibh);

	if (test_and_clear_bit(NILFS_I_NEW, &ii->i_state))
		memset(raw_inode, 0, NILFS_MDT(ifile)->mi_entry_size);
	set_bit(NILFS_I_INODE_DIRTY, &ii->i_state);

	nilfs_write_inode_common(inode, raw_inode, 0);
		/* XXX: call with has_bmap = 0 is a workaround to avoid
		   deadlock of bmap. This delays update of i_bmap to just
		   before writing */
	nilfs_ifile_unmap_inode(ifile, ino, ibh);
}

#define NILFS_MAX_TRUNCATE_BLOCKS	16384  /* 64MB for 4KB block */

static void nilfs_truncate_bmap(struct nilfs_inode_info *ii,
				unsigned long from)
{
	unsigned long b;
	int ret;

	if (!test_bit(NILFS_I_BMAP, &ii->i_state))
		return;
repeat:
	ret = nilfs_bmap_last_key(ii->i_bmap, &b);
	if (ret == -ENOENT)
		return;
	else if (ret < 0)
		goto failed;

	if (b < from)
		return;

	b -= min_t(unsigned long, NILFS_MAX_TRUNCATE_BLOCKS, b - from);
	ret = nilfs_bmap_truncate(ii->i_bmap, b);
	nilfs_relax_pressure_in_lock(ii->vfs_inode.i_sb);
	if (!ret || (ret == -ENOMEM &&
		     nilfs_bmap_truncate(ii->i_bmap, b) == 0))
		goto repeat;

failed:
	nilfs_warning(ii->vfs_inode.i_sb, __func__,
		      "failed to truncate bmap (ino=%lu, err=%d)",
		      ii->vfs_inode.i_ino, ret);
}

void nilfs_truncate(struct inode *inode)
{
	unsigned long blkoff;
	unsigned int blocksize;
	struct nilfs_transaction_info ti;
	struct super_block *sb = inode->i_sb;
	struct nilfs_inode_info *ii = NILFS_I(inode);

	if (!test_bit(NILFS_I_BMAP, &ii->i_state))
		return;
	if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
		return;

	blocksize = sb->s_blocksize;
	blkoff = (inode->i_size + blocksize - 1) >> sb->s_blocksize_bits;
	nilfs_transaction_begin(sb, &ti, 0); /* never fails */

	block_truncate_page(inode->i_mapping, inode->i_size, nilfs_get_block);

	nilfs_truncate_bmap(ii, blkoff);

	inode->i_mtime = inode->i_ctime = CURRENT_TIME;
	if (IS_SYNC(inode))
		nilfs_set_transaction_flag(NILFS_TI_SYNC);

	nilfs_mark_inode_dirty(inode);
	nilfs_set_file_dirty(inode, 0);
	nilfs_transaction_commit(sb);
	/* May construct a logical segment and may fail in sync mode.
	   But truncate has no return value. */
}
Esempio n. 17
0
struct dentry *hpfs_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *nd)
{
    const unsigned char *name = dentry->d_name.name;
    unsigned len = dentry->d_name.len;
    struct quad_buffer_head qbh;
    struct hpfs_dirent *de;
    ino_t ino;
    int err;
    struct inode *result = NULL;
    struct hpfs_inode_info *hpfs_result;

    hpfs_lock(dir->i_sb);
    if ((err = hpfs_chk_name(name, &len))) {
        if (err == -ENAMETOOLONG) {
            hpfs_unlock(dir->i_sb);
            return ERR_PTR(-ENAMETOOLONG);
        }
        goto end_add;
    }

    /*
     * '.' and '..' will never be passed here.
     */

    de = map_dirent(dir, hpfs_i(dir)->i_dno, name, len, NULL, &qbh);

    /*
     * This is not really a bailout, just means file not found.
     */

    if (!de) goto end;

    /*
     * Get inode number, what we're after.
     */

    ino = le32_to_cpu(de->fnode);

    /*
     * Go find or make an inode.
     */

    result = iget_locked(dir->i_sb, ino);
    if (!result) {
        hpfs_error(dir->i_sb, "hpfs_lookup: can't get inode");
        goto bail1;
    }
    if (result->i_state & I_NEW) {
        hpfs_init_inode(result);
        if (de->directory)
            hpfs_read_inode(result);
        else if (le32_to_cpu(de->ea_size) && hpfs_sb(dir->i_sb)->sb_eas)
            hpfs_read_inode(result);
        else {
            result->i_mode |= S_IFREG;
            result->i_mode &= ~0111;
            result->i_op = &hpfs_file_iops;
            result->i_fop = &hpfs_file_ops;
            set_nlink(result, 1);
        }
        unlock_new_inode(result);
    }
    hpfs_result = hpfs_i(result);
    if (!de->directory) hpfs_result->i_parent_dir = dir->i_ino;

    if (de->has_acl || de->has_xtd_perm) if (!(dir->i_sb->s_flags & MS_RDONLY)) {
            hpfs_error(result->i_sb, "ACLs or XPERM found. This is probably HPFS386. This driver doesn't support it now. Send me some info on these structures");
            goto bail1;
        }

    /*
     * Fill in the info from the directory if this is a newly created
     * inode.
     */

    if (!result->i_ctime.tv_sec) {
        if (!(result->i_ctime.tv_sec = local_to_gmt(dir->i_sb, le32_to_cpu(de->creation_date))))
            result->i_ctime.tv_sec = 1;
        result->i_ctime.tv_nsec = 0;
        result->i_mtime.tv_sec = local_to_gmt(dir->i_sb, le32_to_cpu(de->write_date));
        result->i_mtime.tv_nsec = 0;
        result->i_atime.tv_sec = local_to_gmt(dir->i_sb, le32_to_cpu(de->read_date));
        result->i_atime.tv_nsec = 0;
        hpfs_result->i_ea_size = le32_to_cpu(de->ea_size);
        if (!hpfs_result->i_ea_mode && de->read_only)
            result->i_mode &= ~0222;
        if (!de->directory) {
            if (result->i_size == -1) {
                result->i_size = le32_to_cpu(de->file_size);
                result->i_data.a_ops = &hpfs_aops;
                hpfs_i(result)->mmu_private = result->i_size;
                /*
                 * i_blocks should count the fnode and any anodes.
                 * We count 1 for the fnode and don't bother about
                 * anodes -- the disk heads are on the directory band
                 * and we want them to stay there.
                 */
                result->i_blocks = 1 + ((result->i_size + 511) >> 9);
            }
        }
    }
Esempio n. 18
0
static int ll_get_name(struct dentry *dentry, char *name,
		       struct dentry *child)
{
	struct inode *dir = d_inode(dentry);
	int rc;
	struct ll_getname_data lgd = {
		.lgd_name = name,
		.lgd_fid = ll_i2info(d_inode(child))->lli_fid,
		.ctx.actor = ll_nfs_get_name_filldir,
	};

	if (!dir || !S_ISDIR(dir->i_mode)) {
		rc = -ENOTDIR;
		goto out;
	}

	if (!dir->i_fop) {
		rc = -EINVAL;
		goto out;
	}

	mutex_lock(&dir->i_mutex);
	rc = ll_dir_read(dir, &lgd.ctx);
	mutex_unlock(&dir->i_mutex);
	if (!rc && !lgd.lgd_found)
		rc = -ENOENT;
out:
	return rc;
}

static struct dentry *ll_fh_to_dentry(struct super_block *sb, struct fid *fid,
				      int fh_len, int fh_type)
{
	struct lustre_nfs_fid *nfs_fid = (struct lustre_nfs_fid *)fid;

	if (fh_type != LUSTRE_NFS_FID)
		return ERR_PTR(-EPROTO);

	return ll_iget_for_nfs(sb, &nfs_fid->lnf_child, &nfs_fid->lnf_parent);
}

static struct dentry *ll_fh_to_parent(struct super_block *sb, struct fid *fid,
				      int fh_len, int fh_type)
{
	struct lustre_nfs_fid *nfs_fid = (struct lustre_nfs_fid *)fid;

	if (fh_type != LUSTRE_NFS_FID)
		return ERR_PTR(-EPROTO);

	return ll_iget_for_nfs(sb, &nfs_fid->lnf_parent, NULL);
}

static struct dentry *ll_get_parent(struct dentry *dchild)
{
	struct ptlrpc_request *req = NULL;
	struct inode	  *dir = d_inode(dchild);
	struct ll_sb_info     *sbi;
	struct dentry	 *result = NULL;
	struct mdt_body       *body;
	static char	   dotdot[] = "..";
	struct md_op_data     *op_data;
	int		   rc;
	int		      lmmsize;

	LASSERT(dir && S_ISDIR(dir->i_mode));

	sbi = ll_s2sbi(dir->i_sb);

	CDEBUG(D_INFO, "getting parent for (%lu,"DFID")\n",
			dir->i_ino, PFID(ll_inode2fid(dir)));

	rc = ll_get_default_mdsize(sbi, &lmmsize);
	if (rc != 0)
		return ERR_PTR(rc);

	op_data = ll_prep_md_op_data(NULL, dir, NULL, dotdot,
				     strlen(dotdot), lmmsize,
				     LUSTRE_OPC_ANY, NULL);
	if (IS_ERR(op_data))
		return (void *)op_data;

	rc = md_getattr_name(sbi->ll_md_exp, op_data, &req);
	ll_finish_md_op_data(op_data);
	if (rc) {
		CERROR("failure %d inode %lu get parent\n", rc, dir->i_ino);
		return ERR_PTR(rc);
	}
	body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY);
	LASSERT(body->valid & OBD_MD_FLID);

	CDEBUG(D_INFO, "parent for "DFID" is "DFID"\n",
		PFID(ll_inode2fid(dir)), PFID(&body->fid1));

	result = ll_iget_for_nfs(dir->i_sb, &body->fid1, NULL);

	ptlrpc_req_finished(req);
	return result;
}

struct export_operations lustre_export_operations = {
       .get_parent = ll_get_parent,
       .encode_fh  = ll_encode_fh,
       .get_name   = ll_get_name,
	.fh_to_dentry = ll_fh_to_dentry,
	.fh_to_parent = ll_fh_to_parent,
};
Esempio n. 19
0
/**
 * d_path - return the path of a dentry
 * @dentry: dentry to report
 * @vfsmnt: vfsmnt to which the dentry belongs
 * @root: root dentry
 * @rootmnt: vfsmnt to which the root dentry belongs
 * @buffer: buffer to return value in
 * @buflen: buffer length
 *
 * Convert a dentry into an ASCII path name. If the entry has been deleted
 * the string " (deleted)" is appended. Note that this is ambiguous.
 *
 * Returns the buffer or an error code if the path was too long.
 *
 * "buflen" should be positive. Caller holds the dcache_lock.
 */
static char * __talpa_d_path( struct dentry *dentry, struct vfsmount *vfsmnt,
            struct dentry *root, struct vfsmount *rootmnt,
            char *buffer, int buflen)
{
    char * end = buffer+buflen;
    char * retval;
    int namelen;
    unsigned m_seq = 1;

    *--end = '\0';
    buflen--;
    if (!IS_ROOT(dentry) && d_unhashed(dentry)) {
        buflen -= 10;
        end -= 10;
        if (buflen < 0)
            goto Elong;
        memcpy(end, " (deleted)", 10);
    }

    if (buflen < 1)
        goto Elong;
    /* Get '/' right */
    retval = end-1;
    *retval = '/';

    for (;;) {
        struct dentry * parent;

        if (dentry == root && vfsmnt == rootmnt)
            break;
        if (dentry == vfsmnt->mnt_root || IS_ROOT(dentry)) {
            /* Global root? */
            talpa_vfsmount_lock(&m_seq);
            if (vfsmnt->mnt_parent == vfsmnt) {
                talpa_vfsmount_unlock(&m_seq);
                goto global_root;
            }
            dentry = vfsmnt->mnt_mountpoint;
            vfsmnt = vfsmnt->mnt_parent;
            talpa_vfsmount_unlock(&m_seq);
            continue;
        }
        parent = dentry->d_parent;
        prefetch(parent);
        namelen = dentry->d_name.len;
        buflen -= namelen + 1;
        if (buflen < 0)
            goto Elong;
        end -= namelen;
        memcpy(end, dentry->d_name.name, namelen);
        *--end = '/';
        retval = end;
        dentry = parent;
    }

    return retval;

global_root:
    namelen = dentry->d_name.len;
    buflen -= namelen;
    if (buflen < 0)
        goto Elong;
    retval -= namelen-1;    /* hit the slash */
    memcpy(retval, dentry->d_name.name, namelen);
    return retval;
Elong:
    return ERR_PTR(-ENAMETOOLONG);
}
Esempio n. 20
0
static int setup_gpio(struct pi433_device *device)
{
	char	name[5];
	int	retval;
	int	i;
	const irq_handler_t DIO_irq_handler[NUM_DIO] = {
		DIO0_irq_handler,
		DIO1_irq_handler
	};

	for (i = 0; i < NUM_DIO; i++) {
		/* "construct" name and get the gpio descriptor */
		snprintf(name, sizeof(name), "DIO%d", i);
		device->gpiod[i] = gpiod_get(&device->spi->dev, name,
					     0 /*GPIOD_IN*/);

		if (device->gpiod[i] == ERR_PTR(-ENOENT)) {
			dev_dbg(&device->spi->dev,
				"Could not find entry for %s. Ignoring.", name);
			continue;
		}

		if (device->gpiod[i] == ERR_PTR(-EBUSY))
			dev_dbg(&device->spi->dev, "%s is busy.", name);

		if (IS_ERR(device->gpiod[i])) {
			retval = PTR_ERR(device->gpiod[i]);
			/* release already allocated gpios */
			for (i--; i >= 0; i--) {
				free_irq(device->irq_num[i], device);
				gpiod_put(device->gpiod[i]);
			}
			return retval;
		}

		/* configure the pin */
		gpiod_unexport(device->gpiod[i]);
		retval = gpiod_direction_input(device->gpiod[i]);
		if (retval)
			return retval;

		/* configure irq */
		device->irq_num[i] = gpiod_to_irq(device->gpiod[i]);
		if (device->irq_num[i] < 0) {
			device->gpiod[i] = ERR_PTR(-EINVAL);
			return device->irq_num[i];
		}
		retval = request_irq(device->irq_num[i],
				     DIO_irq_handler[i],
				     0, /* flags */
				     name,
				     device);

		if (retval)
			return retval;

		dev_dbg(&device->spi->dev, "%s successfully configured", name);
	}

	return 0;
}
Esempio n. 21
0
static struct ib_ah *c4iw_ah_create(struct ib_pd *pd,
				    struct ib_ah_attr *ah_attr)
{
	return ERR_PTR(-ENOSYS);
}
Esempio n. 22
0
struct ad7879 *ad7879_probe(struct device *dev, u8 devid, unsigned int irq,
			    const struct ad7879_bus_ops *bops)
{
	struct ad7879_platform_data *pdata = dev->platform_data;
	struct ad7879 *ts;
	struct input_dev *input_dev;
	int err;
	u16 revid;

	if (!irq) {
		dev_err(dev, "no IRQ?\n");
		err = -EINVAL;
		goto err_out;
	}

	if (!pdata) {
		dev_err(dev, "no platform data?\n");
		err = -EINVAL;
		goto err_out;
	}

	ts = kzalloc(sizeof(*ts), GFP_KERNEL);
	input_dev = input_allocate_device();
	if (!ts || !input_dev) {
		err = -ENOMEM;
		goto err_free_mem;
	}

	ts->bops = bops;
	ts->dev = dev;
	ts->input = input_dev;
	ts->irq = irq;

	setup_timer(&ts->timer, ad7879_timer, (unsigned long) ts);

	ts->x_plate_ohms = pdata->x_plate_ohms ? : 400;
	ts->pressure_max = pdata->pressure_max ? : ~0;

	ts->first_conversion_delay = pdata->first_conversion_delay;
	ts->acquisition_time = pdata->acquisition_time;
	ts->averaging = pdata->averaging;
	ts->pen_down_acc_interval = pdata->pen_down_acc_interval;
	ts->median = pdata->median;

	snprintf(ts->phys, sizeof(ts->phys), "%s/input0", dev_name(dev));

	input_dev->name = "AD7879 Touchscreen";
	input_dev->phys = ts->phys;
	input_dev->dev.parent = dev;
	input_dev->id.bustype = bops->bustype;

	input_dev->open = ad7879_open;
	input_dev->close = ad7879_close;

	input_set_drvdata(input_dev, ts);

	__set_bit(EV_ABS, input_dev->evbit);
	__set_bit(ABS_X, input_dev->absbit);
	__set_bit(ABS_Y, input_dev->absbit);
	__set_bit(ABS_PRESSURE, input_dev->absbit);

	__set_bit(EV_KEY, input_dev->evbit);
	__set_bit(BTN_TOUCH, input_dev->keybit);

	input_set_abs_params(input_dev, ABS_X,
			pdata->x_min ? : 0,
			pdata->x_max ? : MAX_12BIT,
			0, 0);
	input_set_abs_params(input_dev, ABS_Y,
			pdata->y_min ? : 0,
			pdata->y_max ? : MAX_12BIT,
			0, 0);
	input_set_abs_params(input_dev, ABS_PRESSURE,
			pdata->pressure_min, pdata->pressure_max, 0, 0);

	err = ad7879_write(ts, AD7879_REG_CTRL2, AD7879_RESET);
	if (err < 0) {
		dev_err(dev, "Failed to write %s\n", input_dev->name);
		goto err_free_mem;
	}

	revid = ad7879_read(ts, AD7879_REG_REVID);
	input_dev->id.product = (revid & 0xff);
	input_dev->id.version = revid >> 8;
	if (input_dev->id.product != devid) {
		dev_err(dev, "Failed to probe %s (%x vs %x)\n",
			input_dev->name, devid, revid);
		err = -ENODEV;
		goto err_free_mem;
	}

	ts->cmd_crtl3 = AD7879_YPLUS_BIT |
			AD7879_XPLUS_BIT |
			AD7879_Z2_BIT |
			AD7879_Z1_BIT |
			AD7879_TEMPMASK_BIT |
			AD7879_AUXVBATMASK_BIT |
			AD7879_GPIOALERTMASK_BIT;

	ts->cmd_crtl2 = AD7879_PM(AD7879_PM_DYN) | AD7879_DFR |
			AD7879_AVG(ts->averaging) |
			AD7879_MFS(ts->median) |
			AD7879_FCD(ts->first_conversion_delay);

	ts->cmd_crtl1 = AD7879_MODE_INT | AD7879_MODE_SEQ1 |
			AD7879_ACQ(ts->acquisition_time) |
			AD7879_TMR(ts->pen_down_acc_interval);

	err = request_threaded_irq(ts->irq, NULL, ad7879_irq,
				   IRQF_TRIGGER_FALLING,
				   dev_name(dev), ts);
	if (err) {
		dev_err(dev, "irq %d busy?\n", ts->irq);
		goto err_free_mem;
	}

	__ad7879_disable(ts);

	err = sysfs_create_group(&dev->kobj, &ad7879_attr_group);
	if (err)
		goto err_free_irq;

	err = ad7879_gpio_add(ts, pdata);
	if (err)
		goto err_remove_attr;

	err = input_register_device(input_dev);
	if (err)
		goto err_remove_gpio;

	return ts;

err_remove_gpio:
	ad7879_gpio_remove(ts);
err_remove_attr:
	sysfs_remove_group(&dev->kobj, &ad7879_attr_group);
err_free_irq:
	free_irq(ts->irq, ts);
err_free_mem:
	input_free_device(input_dev);
	kfree(ts);
err_out:
	return ERR_PTR(err);
}
static struct dentry *
cifs_do_mount(struct file_system_type *fs_type,
	      int flags, const char *dev_name, void *data)
{
	int rc;
	struct super_block *sb;
	struct cifs_sb_info *cifs_sb;
	struct smb_vol *volume_info;
	struct cifs_mnt_data mnt_data;
	struct dentry *root;

	cFYI(1, "Devname: %s flags: %d ", dev_name, flags);

	volume_info = cifs_get_volume_info((char *)data, dev_name);
	if (IS_ERR(volume_info))
		return ERR_CAST(volume_info);

	cifs_sb = kzalloc(sizeof(struct cifs_sb_info), GFP_KERNEL);
	if (cifs_sb == NULL) {
		root = ERR_PTR(-ENOMEM);
		goto out_nls;
	}

	cifs_sb->mountdata = kstrndup(data, PAGE_SIZE, GFP_KERNEL);
	if (cifs_sb->mountdata == NULL) {
		root = ERR_PTR(-ENOMEM);
		goto out_cifs_sb;
	}

	cifs_setup_cifs_sb(volume_info, cifs_sb);

	rc = cifs_mount(cifs_sb, volume_info);
	if (rc) {
		if (!(flags & MS_SILENT))
			cERROR(1, "cifs_mount failed w/return code = %d", rc);
		root = ERR_PTR(rc);
		goto out_mountdata;
	}

	mnt_data.vol = volume_info;
	mnt_data.cifs_sb = cifs_sb;
	mnt_data.flags = flags;

	sb = sget(fs_type, cifs_match_super, cifs_set_super, &mnt_data);
	if (IS_ERR(sb)) {
		root = ERR_CAST(sb);
		cifs_umount(cifs_sb);
		goto out;
	}

	if (sb->s_root) {
		cFYI(1, "Use existing superblock");
		cifs_umount(cifs_sb);
	} else {
		sb->s_flags = flags;
		/* BB should we make this contingent on mount parm? */
		sb->s_flags |= MS_NODIRATIME | MS_NOATIME;

		rc = cifs_read_super(sb);
		if (rc) {
			root = ERR_PTR(rc);
			goto out_super;
		}

		sb->s_flags |= MS_ACTIVE;
	}

	root = cifs_get_root(volume_info, sb);
	if (IS_ERR(root))
		goto out_super;

	cFYI(1, "dentry root is: %p", root);
	goto out;

out_super:
	deactivate_locked_super(sb);
out:
	cifs_cleanup_volume_info(volume_info);
	return root;

out_mountdata:
	kfree(cifs_sb->mountdata);
out_cifs_sb:
	kfree(cifs_sb);
out_nls:
	unload_nls(volume_info->local_nls);
	goto out;
}
Esempio n. 24
0
/*
 * returns the number of lower positive dentries,
 * otherwise an error.
 * can be called at unlinking with @type is zero.
 */
int au_lkup_dentry(struct dentry *dentry, aufs_bindex_t bstart, mode_t type,
		   struct nameidata *nd)
{
	int npositive, err;
	aufs_bindex_t bindex, btail, bdiropq;
	unsigned char isdir;
	struct qstr whname;
	struct au_do_lookup_args args = {
		.flags	= 0,
		.type	= type,
		.nd	= nd
	};
	const struct qstr *name = &dentry->d_name;
	struct dentry *parent;
	struct inode *inode;

	err = au_test_shwh(dentry->d_sb, name);
	if (unlikely(err))
		goto out;

	err = au_wh_name_alloc(&whname, name);
	if (unlikely(err))
		goto out;

	inode = dentry->d_inode;
	isdir = !!(inode && S_ISDIR(inode->i_mode));
	if (!type)
		au_fset_lkup(args.flags, ALLOW_NEG);

	npositive = 0;
	parent = dget_parent(dentry);
	btail = au_dbtaildir(parent);
	for (bindex = bstart; bindex <= btail; bindex++) {
		struct dentry *h_parent, *h_dentry;
		struct inode *h_inode, *h_dir;

		h_dentry = au_h_dptr(dentry, bindex);
		if (h_dentry) {
			if (h_dentry->d_inode)
				npositive++;
			if (type != S_IFDIR)
				break;
			continue;
		}
		h_parent = au_h_dptr(parent, bindex);
		if (!h_parent)
			continue;
		h_dir = h_parent->d_inode;
		if (!h_dir || !S_ISDIR(h_dir->i_mode))
			continue;

		mutex_lock_nested(&h_dir->i_mutex, AuLsc_I_PARENT);
		h_dentry = au_do_lookup(h_parent, dentry, bindex, &whname,
					&args);
		mutex_unlock(&h_dir->i_mutex);
		err = PTR_ERR(h_dentry);
		if (IS_ERR(h_dentry))
			goto out_parent;
		au_fclr_lkup(args.flags, ALLOW_NEG);

		if (au_dbwh(dentry) >= 0)
			break;
		if (!h_dentry)
			continue;
		h_inode = h_dentry->d_inode;
		if (!h_inode)
			continue;
		npositive++;
		if (!args.type)
			args.type = h_inode->i_mode & S_IFMT;
		if (args.type != S_IFDIR)
			break;
		else if (isdir) {
			/* the type of lower may be different */
			bdiropq = au_dbdiropq(dentry);
			if (bdiropq >= 0 && bdiropq <= bindex)
				break;
		}
	}

	if (npositive) {
		AuLabel(positive);
		au_update_dbstart(dentry);
	}
	err = npositive;
	if (unlikely(!au_opt_test(au_mntflags(dentry->d_sb), UDBA_NONE)
		     && au_dbstart(dentry) < 0))
		/* both of real entry and whiteout found */
		err = -EIO;

 out_parent:
	dput(parent);
	kfree(whname.name);
 out:
	return err;
}

struct dentry *au_sio_lkup_one(struct qstr *name, struct dentry *parent,
			       struct au_branch *br)
{
	struct dentry *dentry;
	int wkq_err;

	if (!au_test_h_perm_sio(parent->d_inode, MAY_EXEC))
		dentry = au_lkup_one(name, parent, br, /*nd*/NULL);
	else {
		struct au_lkup_one_args args = {
			.errp		= &dentry,
			.name		= name,
			.h_parent	= parent,
			.br		= br,
			.nd		= NULL
		};

		wkq_err = au_wkq_wait(au_call_lkup_one, &args);
		if (unlikely(wkq_err))
			dentry = ERR_PTR(wkq_err);
	}

	return dentry;
}

/*
 * lookup @dentry on @bindex which should be negative.
 */
int au_lkup_neg(struct dentry *dentry, aufs_bindex_t bindex)
{
	int err;
	struct dentry *parent, *h_parent, *h_dentry;

	parent = dget_parent(dentry);
	h_parent = au_h_dptr(parent, bindex);
	h_dentry = au_sio_lkup_one(&dentry->d_name, h_parent,
				   au_sbr(dentry->d_sb, bindex));
	err = PTR_ERR(h_dentry);
	if (IS_ERR(h_dentry))
		goto out;
	if (unlikely(h_dentry->d_inode)) {
		err = -EIO;
		AuIOErr("b%d %.*s should be negative.\n",
			bindex, AuDLNPair(h_dentry));
		dput(h_dentry);
		goto out;
	}

	err = 0;
	if (bindex < au_dbstart(dentry))
		au_set_dbstart(dentry, bindex);
	if (au_dbend(dentry) < bindex)
		au_set_dbend(dentry, bindex);
	au_set_h_dptr(dentry, bindex, h_dentry);

 out:
	dput(parent);
	return err;
}
Esempio n. 25
0
/**
 * kthread_create_on_node - create a kthread.
 * @threadfn: the function to run until signal_pending(current).
 * @data: data ptr for @threadfn.
 * @node: memory node number.
 * @namefmt: printf-style name for the thread.
 *
 * Description: This helper function creates and names a kernel
 * thread.  The thread will be stopped: use wake_up_process() to start
 * it.  See also kthread_run().
 *
 * If thread is going to be bound on a particular cpu, give its node
 * in @node, to get NUMA affinity for kthread stack, or else give -1.
 * When woken, the thread will run @threadfn() with @data as its
 * argument. @threadfn() can either call do_exit() directly if it is a
 * standalone thread for which no one will call kthread_stop(), or
 * return when 'kthread_should_stop()' is true (which means
 * kthread_stop() has been called).  The return value should be zero
 * or a negative error number; it will be passed to kthread_stop().
 *
 * Returns a task_struct or ERR_PTR(-ENOMEM) or ERR_PTR(-EINTR).
 */
struct task_struct *kthread_create_on_node(int (*threadfn)(void *data),
					   void *data,
					   int node,
					   const char namefmt[],
					   ...)
{
	DECLARE_COMPLETION_ONSTACK(done);
	struct task_struct *task;
	struct kthread_create_info *create = kmalloc(sizeof(*create),
						     GFP_KERNEL);

	if (!create)
		return ERR_PTR(-ENOMEM);
	create->threadfn = threadfn;
	create->data = data;
	create->node = node;
	create->done = &done;

	spin_lock(&kthread_create_lock);
	list_add_tail(&create->list, &kthread_create_list);
	spin_unlock(&kthread_create_lock);

	wake_up_process(kthreadd_task);
	/*
	 * Wait for completion in killable state, for I might be chosen by
	 * the OOM killer while kthreadd is trying to allocate memory for
	 * new kernel thread.
	 */
	if (unlikely(wait_for_completion_killable(&done))) {
		/*
		 * If I was SIGKILLed before kthreadd (or new kernel thread)
		 * calls complete(), leave the cleanup of this structure to
		 * that thread.
		 */
		if (xchg(&create->done, NULL))
			return ERR_PTR(-EINTR);
		/*
		 * kthreadd (or new kernel thread) will call complete()
		 * shortly.
		 */
		wait_for_completion(&done);
	}
	task = create->result;
	if (!IS_ERR(task)) {
		static const struct sched_param param = { .sched_priority = 0 };
		va_list args;

		va_start(args, namefmt);
		vsnprintf(task->comm, sizeof(task->comm), namefmt, args);
		va_end(args);
		/*
		 * root may have changed our (kthreadd's) priority or CPU mask.
		 * The kernel thread should not inherit these properties.
		 */
		sched_setscheduler_nocheck(task, SCHED_NORMAL, &param);
		set_cpus_allowed_ptr(task, cpu_all_mask);
	}
	kfree(create);
	return task;
}
EXPORT_SYMBOL(kthread_create_on_node);

/**
 * kthread_bind - bind a just-created kthread to a cpu.
 * @p: thread created by kthread_create().
 * @cpu: cpu (might not be online, must be possible) for @k to run on.
 *
 * Description: This function is equivalent to set_cpus_allowed(),
 * except that @cpu doesn't need to be online, and the thread must be
 * stopped (i.e., just returned from kthread_create()).
 */
void kthread_bind(struct task_struct *p, unsigned int cpu)
{
	/* Must have done schedule() in kthread() before we set_task_cpu */
	if (!wait_task_inactive(p, TASK_UNINTERRUPTIBLE)) {
		WARN_ON(1);
		return;
	}

	/* It's safe because the task is inactive. */
	do_set_cpus_allowed(p, cpumask_of(cpu));
	p->flags |= PF_THREAD_BOUND;
}
EXPORT_SYMBOL(kthread_bind);

/**
 * kthread_stop - stop a thread created by kthread_create().
 * @k: thread created by kthread_create().
 *
 * Sets kthread_should_stop() for @k to return true, wakes it, and
 * waits for it to exit. This can also be called after kthread_create()
 * instead of calling wake_up_process(): the thread will exit without
 * calling threadfn().
 *
 * If threadfn() may call do_exit() itself, the caller must ensure
 * task_struct can't go away.
 *
 * Returns the result of threadfn(), or %-EINTR if wake_up_process()
 * was never called.
 */
int kthread_stop(struct task_struct *k)
{
	struct kthread *kthread;
	int ret;

	trace_sched_kthread_stop(k);
	get_task_struct(k);

	kthread = to_kthread(k);
	barrier(); /* it might have exited */
	if (k->vfork_done != NULL) {
		kthread->should_stop = 1;
		wake_up_process(k);
		wait_for_completion(&kthread->exited);
	}
	ret = k->exit_code;

	put_task_struct(k);
	trace_sched_kthread_stop_ret(ret);

	return ret;
}
EXPORT_SYMBOL(kthread_stop);

int kthreadd(void *unused)
{
	struct task_struct *tsk = current;

	/* Setup a clean context for our children to inherit. */
	set_task_comm(tsk, "kthreadd");
	ignore_signals(tsk);
	set_cpus_allowed_ptr(tsk, cpu_all_mask);
	set_mems_allowed(node_states[N_HIGH_MEMORY]);

	current->flags |= PF_NOFREEZE;

	for (;;) {
		set_current_state(TASK_INTERRUPTIBLE);
		if (list_empty(&kthread_create_list))
			schedule();
		__set_current_state(TASK_RUNNING);

		spin_lock(&kthread_create_lock);
		while (!list_empty(&kthread_create_list)) {
			struct kthread_create_info *create;

			create = list_entry(kthread_create_list.next,
					    struct kthread_create_info, list);
			list_del_init(&create->list);
			spin_unlock(&kthread_create_lock);

			create_kthread(create);

			spin_lock(&kthread_create_lock);
		}
		spin_unlock(&kthread_create_lock);
	}

	return 0;
}

void __init_kthread_worker(struct kthread_worker *worker,
				const char *name,
				struct lock_class_key *key)
{
	spin_lock_init(&worker->lock);
	lockdep_set_class_and_name(&worker->lock, key, name);
	INIT_LIST_HEAD(&worker->work_list);
	worker->task = NULL;
}
Esempio n. 26
0
/*
 * returns positive/negative dentry, NULL or an error.
 * NULL means whiteout-ed or not-found.
 */
static struct dentry*
au_do_lookup(struct dentry *h_parent, struct dentry *dentry,
	     aufs_bindex_t bindex, struct qstr *wh_name,
	     struct au_do_lookup_args *args)
{
	struct dentry *h_dentry;
	struct inode *h_inode, *inode;
	struct au_branch *br;
	int wh_found, opq;
	unsigned char wh_able;
	const unsigned char allow_neg = !!au_ftest_lkup(args->flags, ALLOW_NEG);

	wh_found = 0;
	br = au_sbr(dentry->d_sb, bindex);
	wh_able = !!au_br_whable(br->br_perm);
	if (wh_able)
		wh_found = au_wh_test(h_parent, wh_name, br, /*try_sio*/0);
	h_dentry = ERR_PTR(wh_found);
	if (!wh_found)
		goto real_lookup;
	if (unlikely(wh_found < 0))
		goto out;

	/* We found a whiteout */
	/* au_set_dbend(dentry, bindex); */
	au_set_dbwh(dentry, bindex);
	if (!allow_neg)
		return NULL; /* success */

 real_lookup:
	h_dentry = au_lkup_one(&dentry->d_name, h_parent, br, args->nd);
	if (IS_ERR(h_dentry))
		goto out;

	h_inode = h_dentry->d_inode;
	if (!h_inode) {
		if (!allow_neg)
			goto out_neg;
	} else if (wh_found
		   || (args->type && args->type != (h_inode->i_mode & S_IFMT)))
		goto out_neg;

	if (au_dbend(dentry) <= bindex)
		au_set_dbend(dentry, bindex);
	if (au_dbstart(dentry) < 0 || bindex < au_dbstart(dentry))
		au_set_dbstart(dentry, bindex);
	au_set_h_dptr(dentry, bindex, h_dentry);

	inode = dentry->d_inode;
	if (!h_inode || !S_ISDIR(h_inode->i_mode) || !wh_able
	    || (inode && !S_ISDIR(inode->i_mode)))
		goto out; /* success */

	mutex_lock_nested(&h_inode->i_mutex, AuLsc_I_CHILD);
	opq = au_diropq_test(h_dentry, br);
	mutex_unlock(&h_inode->i_mutex);
	if (opq > 0)
		au_set_dbdiropq(dentry, bindex);
	else if (unlikely(opq < 0)) {
		au_set_h_dptr(dentry, bindex, NULL);
		h_dentry = ERR_PTR(opq);
	}
	goto out;

 out_neg:
	dput(h_dentry);
	h_dentry = NULL;
 out:
	return h_dentry;
}
Esempio n. 27
0
/*
 * inode retrieval
 */
struct inode *afs_iget(struct super_block *sb, struct key *key,
		       struct afs_fid *fid, struct afs_file_status *status,
		       struct afs_callback *cb)
{
	struct afs_iget_data data = { .fid = *fid };
	struct afs_super_info *as;
	struct afs_vnode *vnode;
	struct inode *inode;
	int ret;

	_enter(",{%x:%u.%u},,", fid->vid, fid->vnode, fid->unique);

	as = sb->s_fs_info;
	data.volume = as->volume;

	inode = iget5_locked(sb, fid->vnode, afs_iget5_test, afs_iget5_set,
			     &data);
	if (!inode) {
		_leave(" = -ENOMEM");
		return ERR_PTR(-ENOMEM);
	}

	_debug("GOT INODE %p { vl=%x vn=%x, u=%x }",
	       inode, fid->vid, fid->vnode, fid->unique);

	vnode = AFS_FS_I(inode);

	/* deal with an existing inode */
	if (!(inode->i_state & I_NEW)) {
		_leave(" = %p", inode);
		return inode;
	}

	if (!status) {
		/* it's a remotely extant inode */
		set_bit(AFS_VNODE_CB_BROKEN, &vnode->flags);
		ret = afs_vnode_fetch_status(vnode, NULL, key);
		if (ret < 0)
			goto bad_inode;
	} else {
		/* it's an inode we just created */
		memcpy(&vnode->status, status, sizeof(vnode->status));

		if (!cb) {
			/* it's a symlink we just created (the fileserver
			 * didn't give us a callback) */
			vnode->cb_version = 0;
			vnode->cb_expiry = 0;
			vnode->cb_type = 0;
			vnode->cb_expires = get_seconds();
		} else {
			vnode->cb_version = cb->version;
			vnode->cb_expiry = cb->expiry;
			vnode->cb_type = cb->type;
			vnode->cb_expires = vnode->cb_expiry + get_seconds();
		}
	}

	/* set up caching before mapping the status, as map-status reads the
	 * first page of symlinks to see if they're really mountpoints */
	inode->i_size = vnode->status.size;
#ifdef CONFIG_AFS_FSCACHE
	vnode->cache = fscache_acquire_cookie(vnode->volume->cache,
					      &afs_vnode_cache_index_def,
					      vnode);
#endif

	ret = afs_inode_map_status(vnode, key);
	if (ret < 0)
		goto bad_inode;

	/* success */
	clear_bit(AFS_VNODE_UNSET, &vnode->flags);
	inode->i_flags |= S_NOATIME;
	unlock_new_inode(inode);
	_leave(" = %p [CB { v=%u t=%u }]", inode, vnode->cb_version, vnode->cb_type);
	return inode;

	/* failure */
bad_inode:
#ifdef CONFIG_AFS_FSCACHE
	fscache_relinquish_cookie(vnode->cache, 0);
	vnode->cache = NULL;
#endif
	iget_failed(inode);
	_leave(" = %d [bad]", ret);
	return ERR_PTR(ret);
}
Esempio n. 28
0
static int au_wbr_fd(struct path *path, struct aufs_wbr_fd __user *arg)
{
	int err, fd;
	aufs_bindex_t wbi, bindex, bend;
	struct file *h_file;
	struct super_block *sb;
	struct dentry *root;
	struct au_branch *br;
	struct aufs_wbr_fd wbrfd = {
		.oflags	= au_dir_roflags,
		.brid	= -1
	};
	const int valid = O_RDONLY | O_NONBLOCK | O_LARGEFILE | O_DIRECTORY
		| O_NOATIME | O_CLOEXEC;

	AuDebugOn(wbrfd.oflags & ~valid);

	if (arg) {
		err = copy_from_user(&wbrfd, arg, sizeof(wbrfd));
		if (unlikely(err)) {
			err = -EFAULT;
			goto out;
		}

		err = -EINVAL;
		AuDbg("wbrfd{0%o, %d}\n", wbrfd.oflags, wbrfd.brid);
		wbrfd.oflags |= au_dir_roflags;
		AuDbg("0%o\n", wbrfd.oflags);
		if (unlikely(wbrfd.oflags & ~valid))
			goto out;
	}

	fd = get_unused_fd();
	err = fd;
	if (unlikely(fd < 0))
		goto out;

	h_file = ERR_PTR(-EINVAL);
	wbi = 0;
	br = NULL;
	sb = path->dentry->d_sb;
	root = sb->s_root;
	aufs_read_lock(root, AuLock_IR);
	bend = au_sbend(sb);
	if (wbrfd.brid >= 0) {
		wbi = au_br_index(sb, wbrfd.brid);
		if (unlikely(wbi < 0 || wbi > bend))
			goto out_unlock;
	}

	h_file = ERR_PTR(-ENOENT);
	br = au_sbr(sb, wbi);
	if (!au_br_writable(br->br_perm)) {
		if (arg)
			goto out_unlock;

		bindex = wbi + 1;
		wbi = -1;
		for (; bindex <= bend; bindex++) {
			br = au_sbr(sb, bindex);
			if (au_br_writable(br->br_perm)) {
				wbi = bindex;
				br = au_sbr(sb, wbi);
				break;
			}
		}
	}
	AuDbg("wbi %d\n", wbi);
	if (wbi >= 0)
		h_file = au_h_open(root, wbi, wbrfd.oflags, NULL,
				   /*force_wr*/0);

out_unlock:
	aufs_read_unlock(root, AuLock_IR);
	err = PTR_ERR(h_file);
	if (IS_ERR(h_file))
		goto out_fd;

	atomic_dec(&br->br_count); /* cf. au_h_open() */
	fd_install(fd, h_file);
	err = fd;
	goto out; /* success */

out_fd:
	put_unused_fd(fd);
out:
	AuTraceErr(err);
	return err;
}

/* ---------------------------------------------------------------------- */

long aufs_ioctl_dir(struct file *file, unsigned int cmd, unsigned long arg)
{
	long err;
	struct dentry *dentry;

	switch (cmd) {
	case AUFS_CTL_RDU:
	case AUFS_CTL_RDU_INO:
		err = au_rdu_ioctl(file, cmd, arg);
		break;

	case AUFS_CTL_WBR_FD:
		err = au_wbr_fd(&file->f_path, (void __user *)arg);
		break;

	case AUFS_CTL_IBUSY:
		err = au_ibusy_ioctl(file, arg);
		break;

	case AUFS_CTL_BRINFO:
		err = au_brinfo_ioctl(file, arg);
		break;

	case AUFS_CTL_FHSM_FD:
		dentry = file->f_dentry;
		if (IS_ROOT(dentry))
			err = au_fhsm_fd(dentry->d_sb, arg);
		else
			err = -ENOTTY;
		break;

	default:
		/* do not call the lower */
		AuDbg("0x%x\n", cmd);
		err = -ENOTTY;
	}

	AuTraceErr(err);
	return err;
}
Esempio n. 29
0
/*
 * We have a vnode/inode, now get the dentry.  If there isn't
 * one already pointing to the inode, we must fabricate a
 * disconnected entry for NFS to connect up to the known
 * dcache tree (in the paranoia/export directory checking
 * cases--but performance will be better if
 * NFSEXP_NOSUBTREECHECK is present in the export options--but
 * will this break our use of dentry->d_parent->d_inode in
 * dentry_to_fh()? XXX).
 * Returns a valid dentry pointer or a negative error code.
 * If the returned dentry is a new one, the inode's refcount is
 * incremented.
 */
STATIC struct dentry *
vnlayer_find_dentry(VNODE_T *vp)
{
    static const struct qstr this_is_anon  = { .name = ""};
    struct dentry *dp;
    struct dentry *dnew;
    INODE_T *ip;

    ip = VTOI(vp);
    /*
     * We create an anonymous dentry for NFS, but it will be used
     * only if we don't find a suitable one for this inode.
     * d_alloc is here because before 2.6.38 it acquires dcache_lock.
     */
    dnew = d_alloc(NULL, &this_is_anon);
    if (dnew == NULL) {
        dp = ERR_PTR(-ENOMEM);
    } else {
        dnew->d_parent = dnew;
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,38)
        spin_lock(&dcache_lock);
#else
        spin_lock(&ip->i_lock);
#endif
        /*
         * equivalent of d_splice_alias,
         * we only want view-extended dentries
         */
        dp = vnlayer_inode2dentry_internal_no_lock(ip,
                                                   NULL,
                                                   NULL,
                                                   &vnode_dentry_ops);
        if (dp == NULL) {
            /* new dentry, increase the refcount for this v/inode */
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,38)
            VN_HOLD(vp);
#else
            /* can't igrab because the spinlock is acquired */
            ihold(ip);
#endif
            /* found no suitable dentry, add a new one */
            MDKI_SET_DOPS(dnew, &vnode_dentry_ops);
            dnew->d_sb = ip->i_sb;
            dnew->d_inode = ip;
#if LINUX_VERSION_CODE < KERNEL_VERSION(3,0,0)
            dnew->d_flags |= NFSD_DCACHE_DISCON;
# if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
            dnew->d_flags &= ~DCACHE_UNHASHED;
# endif
#else /* LINUX_VERSION_CODE < KERNEL_VERSION(3,0,0) */
            /* it is not clear if it needs DCACHE_RCUACCESS */
            dnew->d_flags |= NFSD_DCACHE_DISCON;
#endif /* else LINUX_VERSION_CODE < KERNEL_VERSION(3,0,0) */
            list_add(&dnew->d_alias, &ip->i_dentry);
#if LINUX_VERSION_CODE < KERNEL_VERSION(3,0,0)
# if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
            hlist_add_head(&dnew->d_hash, &ip->i_sb->s_anon);
# endif
#else /* LINUX_VERSION_CODE < KERNEL_VERSION(3,0,0) */
            hlist_bl_lock(&ip->i_sb->s_anon);
            hlist_bl_add_head_rcu(&dnew->d_hash, &ip->i_sb->s_anon);
            hlist_bl_unlock(&ip->i_sb->s_anon);
#endif /* else LINUX_VERSION_CODE < KERNEL_VERSION(3,0,0) */
            dp = dnew;
            /* skip the dput call for dnew, we will need it */
            dnew = NULL;
        }
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,38)
        spin_unlock(&dcache_lock);
#else
        spin_unlock(&ip->i_lock);
#endif
    }
    /* dput dnew if we don't use it */
    if (dnew != NULL)
        dput(dnew);
    return dp;
}

static const char vnode_verid_mvfs_linux_sops_c[] = "$Id:  2d7ea047.afe111e1.8fa4.00:01:84:c3:8a:52 $";
Esempio n. 30
0
static struct posix_acl *jffs2_acl_from_medium(void *value, size_t size)
{
	void *end = value + size;
	struct jffs2_acl_header *header = value;
	struct jffs2_acl_entry *entry;
	struct posix_acl *acl;
	uint32_t ver;
	int i, count;

	if (!value)
		return NULL;
	if (size < sizeof(struct jffs2_acl_header))
		return ERR_PTR(-EINVAL);
	ver = je32_to_cpu(header->a_version);
	if (ver != JFFS2_ACL_VERSION) {
		JFFS2_WARNING("Invalid ACL version. (=%u)\n", ver);
		return ERR_PTR(-EINVAL);
	}

	value += sizeof(struct jffs2_acl_header);
	count = jffs2_acl_count(size);
	if (count < 0)
		return ERR_PTR(-EINVAL);
	if (count == 0)
		return NULL;

	acl = posix_acl_alloc(count, GFP_KERNEL);
	if (!acl)
		return ERR_PTR(-ENOMEM);

	for (i=0; i < count; i++) {
		entry = value;
		if (value + sizeof(struct jffs2_acl_entry_short) > end)
			goto fail;
		acl->a_entries[i].e_tag = je16_to_cpu(entry->e_tag);
		acl->a_entries[i].e_perm = je16_to_cpu(entry->e_perm);
		switch (acl->a_entries[i].e_tag) {
			case ACL_USER_OBJ:
			case ACL_GROUP_OBJ:
			case ACL_MASK:
			case ACL_OTHER:
				value += sizeof(struct jffs2_acl_entry_short);
				acl->a_entries[i].e_id = ACL_UNDEFINED_ID;
				break;

			case ACL_USER:
			case ACL_GROUP:
				value += sizeof(struct jffs2_acl_entry);
				if (value > end)
					goto fail;
				acl->a_entries[i].e_id = je32_to_cpu(entry->e_id);
				break;

			default:
				goto fail;
		}
	}
	if (value != end)
		goto fail;
	return acl;
 fail:
	posix_acl_release(acl);
	return ERR_PTR(-EINVAL);
}