コード例 #1
0
static int nfsio_alloc_sync(struct ploop_io * io, loff_t pos, loff_t len)
{
	int head_len = len & (PAGE_SIZE - 1);
	int nr_total = len >> PAGE_SHIFT;
	int nr = 1 << (io->plo->cluster_log + 9 - PAGE_SHIFT);
	struct page * pvec[nr];
	int i;
	int err = 0;

	for (i = 0; i < nr; i++)
		pvec[i] = ZERO_PAGE(0);

	if (head_len) {
		err = nfsio_sync_write(io, pvec[0], head_len, 0, pos >> 9);
		if (err)
			return err;

		pos += head_len;
	}

	while (nr_total > 0) {
		int n = (nr_total < nr) ? nr_total : nr;

		err = nfsio_sync_writevec(io, pvec, n, pos >> 9);
		if (err)
			return err;

		pos += n << PAGE_SHIFT;
		nr_total -= n;
	}

	io->alloc_head = pos >> (io->plo->cluster_log + 9);
	return 0;
}
コード例 #2
0
ファイル: bio.c プロジェクト: AlexShiLucky/linux
int fscrypt_zeroout_range(const struct inode *inode, pgoff_t lblk,
				sector_t pblk, unsigned int len)
{
	struct fscrypt_ctx *ctx;
	struct page *ciphertext_page = NULL;
	struct bio *bio;
	int ret, err = 0;

	BUG_ON(inode->i_sb->s_blocksize != PAGE_SIZE);

	ctx = fscrypt_get_ctx(inode, GFP_NOFS);
	if (IS_ERR(ctx))
		return PTR_ERR(ctx);

	ciphertext_page = fscrypt_alloc_bounce_page(ctx, GFP_NOWAIT);
	if (IS_ERR(ciphertext_page)) {
		err = PTR_ERR(ciphertext_page);
		goto errout;
	}

	while (len--) {
		err = fscrypt_do_page_crypto(inode, FS_ENCRYPT, lblk,
					     ZERO_PAGE(0), ciphertext_page,
					     PAGE_SIZE, 0, GFP_NOFS);
		if (err)
			goto errout;

		bio = bio_alloc(GFP_NOWAIT, 1);
		if (!bio) {
			err = -ENOMEM;
			goto errout;
		}
		bio_set_dev(bio, inode->i_sb->s_bdev);
		bio->bi_iter.bi_sector =
			pblk << (inode->i_sb->s_blocksize_bits - 9);
		bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
		ret = bio_add_page(bio, ciphertext_page,
					inode->i_sb->s_blocksize, 0);
		if (ret != inode->i_sb->s_blocksize) {
			/* should never happen! */
			WARN_ON(1);
			bio_put(bio);
			err = -EIO;
			goto errout;
		}
		err = submit_bio_wait(bio);
		if (err == 0 && bio->bi_status)
			err = -EIO;
		bio_put(bio);
		if (err)
			goto errout;
		lblk++;
		pblk++;
	}
	err = 0;
errout:
	fscrypt_release_ctx(ctx);
	return err;
}
コード例 #3
0
ファイル: memory.c プロジェクト: davidbau/davej
/*
 * We special-case the C-O-W ZERO_PAGE, because it's such
 * a common occurrence (no need to read the page to know
 * that it's zero - better for the cache and memory subsystem).
 */
static inline void copy_cow_page(struct page * from, struct page * to, unsigned long address)
{
	if (from == ZERO_PAGE(address)) {
		clear_user_highpage(to, address);
		return;
	}
	copy_user_highpage(to, from, address);
}
コード例 #4
0
ファイル: crypto.c プロジェクト: Menpiko/SnaPKernel-N6P
int ext4_encrypted_zeroout(struct inode *inode, struct ext4_extent *ex)
{
	struct ext4_crypto_ctx	*ctx;
	struct page		*ciphertext_page = NULL;
	struct bio		*bio;
	ext4_lblk_t		lblk = ex->ee_block;
	ext4_fsblk_t		pblk = ext4_ext_pblock(ex);
	unsigned int		len = ext4_ext_get_actual_len(ex);
	int			err = 0;

	BUG_ON(inode->i_sb->s_blocksize != PAGE_CACHE_SIZE);

	ctx = ext4_get_crypto_ctx(inode);
	if (IS_ERR(ctx))
		return PTR_ERR(ctx);

	ciphertext_page = alloc_bounce_page(ctx);
	if (IS_ERR(ciphertext_page)) {
		err = PTR_ERR(ciphertext_page);
		goto errout;
	}

	while (len--) {
		err = ext4_page_crypto(ctx, inode, EXT4_ENCRYPT, lblk,
				       ZERO_PAGE(0), ciphertext_page);
		if (err)
			goto errout;

		bio = bio_alloc(GFP_KERNEL, 1);
		if (!bio) {
			err = -ENOMEM;
			goto errout;
		}
		bio->bi_bdev = inode->i_sb->s_bdev;
		bio->bi_sector = pblk;
		err = bio_add_page(bio, ciphertext_page,
				   inode->i_sb->s_blocksize, 0);
		if (err) {
			bio_put(bio);
			goto errout;
		}
		err = submit_bio_wait(WRITE, bio);
		bio_put(bio);
		if (err)
			goto errout;
	}
	err = 0;
errout:
	ext4_release_crypto_ctx(ctx);
	return err;
}
コード例 #5
0
ファイル: blk-lib.c プロジェクト: DenisLug/mptcp
static int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
				  sector_t nr_sects, gfp_t gfp_mask)
{
	int ret;
	struct bio *bio;
	struct bio_batch bb;
	unsigned int sz;
	DECLARE_COMPLETION_ONSTACK(wait);

	atomic_set(&bb.done, 1);
	bb.error = 0;
	bb.wait = &wait;

	ret = 0;
	while (nr_sects != 0) {
		bio = bio_alloc(gfp_mask,
				min(nr_sects, (sector_t)BIO_MAX_PAGES));
		if (!bio) {
			ret = -ENOMEM;
			break;
		}

		bio->bi_iter.bi_sector = sector;
		bio->bi_bdev   = bdev;
		bio->bi_end_io = bio_batch_end_io;
		bio->bi_private = &bb;

		while (nr_sects != 0) {
			sz = min((sector_t) PAGE_SIZE >> 9 , nr_sects);
			ret = bio_add_page(bio, ZERO_PAGE(0), sz << 9, 0);
			nr_sects -= ret >> 9;
			sector += ret >> 9;
			if (ret < (sz << 9))
				break;
		}
		ret = 0;
		atomic_inc(&bb.done);
		submit_bio(WRITE, bio);
	}

	/* Wait for bios in-flight */
	if (!atomic_dec_and_test(&bb.done))
		wait_for_completion_io(&wait);

	if (bb.error)
		return bb.error;
	return ret;
}
コード例 #6
0
ファイル: blk-lib.c プロジェクト: DenisLug/mptcp
int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
			 sector_t nr_sects, gfp_t gfp_mask, bool discard)
{
	struct request_queue *q = bdev_get_queue(bdev);

	if (discard && blk_queue_discard(q) && q->limits.discard_zeroes_data &&
	    blkdev_issue_discard(bdev, sector, nr_sects, gfp_mask, 0) == 0)
		return 0;

	if (bdev_write_same(bdev) &&
	    blkdev_issue_write_same(bdev, sector, nr_sects, gfp_mask,
				    ZERO_PAGE(0)) == 0)
		return 0;

	return __blkdev_issue_zeroout(bdev, sector, nr_sects, gfp_mask);
}
コード例 #7
0
ファイル: memory.c プロジェクト: davidbau/davej
/*
 * This only needs the MM semaphore
 */
static int do_anonymous_page(struct mm_struct * mm, struct vm_area_struct * vma, pte_t *page_table, int write_access, unsigned long addr)
{
	struct page *page = NULL;
	pte_t entry = pte_wrprotect(mk_pte(ZERO_PAGE(addr), vma->vm_page_prot));
	if (write_access) {
		page = alloc_page(GFP_HIGHUSER);
		if (!page)
			return -1;
		clear_user_highpage(page, addr);
		entry = pte_mkwrite(pte_mkdirty(mk_pte(page, vma->vm_page_prot)));
		mm->rss++;
		flush_page_to_ram(page);
	}
	set_pte(page_table, entry);
	/* No need to invalidate - it was non-present before */
	update_mmu_cache(vma, addr, entry);
	return 1;	/* Minor fault */
}
コード例 #8
0
ファイル: memory.c プロジェクト: davidbau/davej
static inline void zeromap_pte_range(pte_t * pte, unsigned long address,
                                     unsigned long size, pgprot_t prot)
{
	unsigned long end;

	address &= ~PMD_MASK;
	end = address + size;
	if (end > PMD_SIZE)
		end = PMD_SIZE;
	do {
		pte_t zero_pte = pte_wrprotect(mk_pte(ZERO_PAGE(address), prot));
		pte_t oldpage = ptep_get_and_clear(pte);
		set_pte(pte, zero_pte);
		forget_pte(oldpage);
		address += PAGE_SIZE;
		pte++;
	} while (address && (address < end));
}
コード例 #9
0
ファイル: memory.c プロジェクト: iwangv/edimax-br-6528n
/*
 * We are called with the MM semaphore and page_table_lock
 * spinlock held to protect against concurrent faults in
 * multithreaded programs. 
 */
static int do_anonymous_page(struct mm_struct * mm, struct vm_area_struct * vma, pte_t *page_table, int write_access, unsigned long addr)
{
	pte_t entry;

	/* Read-only mapping of ZERO_PAGE. */
	entry = pte_wrprotect(mk_pte(ZERO_PAGE(addr), vma->vm_page_prot));

	/* ..except if it's a write access */
	if (write_access) {
		struct page *page;

		/* Allocate our own private page. */
		spin_unlock(&mm->page_table_lock);

		page = alloc_page(GFP_HIGHUSER);
		if (!page)
			goto no_mem;
		clear_user_highpage(page, addr);

		spin_lock(&mm->page_table_lock);
		if (!pte_none(*page_table)) {
			page_cache_release(page);
			spin_unlock(&mm->page_table_lock);
			return 1;
		}
		mm->rss++;
		flush_page_to_ram(page);
		entry = pte_mkwrite(pte_mkdirty(mk_pte(page, vma->vm_page_prot)));
		lru_cache_add(page);
		mark_page_accessed(page);
	}

	set_pte(page_table, entry);

	/* No need to invalidate - it was non-present before */
	update_mmu_cache(vma, addr, entry);
	spin_unlock(&mm->page_table_lock);
	return 1;	/* Minor fault */

no_mem:
	return -1;
}
コード例 #10
0
static inline void zeromap_pte_range(struct mm_struct *mm, pte_t * pte,
				     unsigned long address, unsigned long size,
				     pgprot_t prot)
{
	unsigned long end;

	debug_lock_break(1);
	break_spin_lock(&mm->page_table_lock);

	address &= ~PMD_MASK;
	end = address + size;
	if (end > PMD_SIZE)
		end = PMD_SIZE;
	do {
		pte_t zero_pte = pte_wrprotect(mk_pte(ZERO_PAGE(address), prot));
		pte_t oldpage = ptep_get_and_clear(pte);
		set_pte(pte, zero_pte);
		forget_pte(oldpage);
		address += PAGE_SIZE;
		pte++;
	} while (address && (address < end));
}
コード例 #11
0
ファイル: flush.c プロジェクト: 0-T-0/ps4-linux
/*
 * Ensure cache coherency between kernel mapping and userspace mapping
 * of this page.
 */
void flush_dcache_page(struct page *page)
{
	struct address_space *mapping;

	/*
	 * The zero page is never written to, so never has any dirty
	 * cache lines, and therefore never needs to be flushed.
	 */
	if (page == ZERO_PAGE(0))
		return;

	mapping = page_mapping(page);

	if (mapping && !mapping_mapped(mapping))
		clear_bit(PG_dcache_clean, &page->flags);
	else {
		__flush_dcache_page(mapping, page);
		if (mapping)
			__flush_icache_all();
		set_bit(PG_dcache_clean, &page->flags);
	}
}
コード例 #12
0
ファイル: tlb.c プロジェクト: 01org/KVMGT-kernel
/*
 * Called at the end of pagefault, for a userspace mapped page
 *  -pre-install the corresponding TLB entry into MMU
 *  -Finalize the delayed D-cache flush of kernel mapping of page due to
 *  	flush_dcache_page(), copy_user_page()
 *
 * Note that flush (when done) involves both WBACK - so physical page is
 * in sync as well as INV - so any non-congruent aliases don't remain
 */
void update_mmu_cache(struct vm_area_struct *vma, unsigned long vaddr_unaligned,
		      pte_t *ptep)
{
	unsigned long vaddr = vaddr_unaligned & PAGE_MASK;
	unsigned long paddr = pte_val(*ptep) & PAGE_MASK;
	struct page *page = pfn_to_page(pte_pfn(*ptep));

	create_tlb(vma, vaddr, ptep);

	if (page == ZERO_PAGE(0)) {
		return;
	}

	/*
	 * Exec page : Independent of aliasing/page-color considerations,
	 *	       since icache doesn't snoop dcache on ARC, any dirty
	 *	       K-mapping of a code page needs to be wback+inv so that
	 *	       icache fetch by userspace sees code correctly.
	 * !EXEC page: If K-mapping is NOT congruent to U-mapping, flush it
	 *	       so userspace sees the right data.
	 *  (Avoids the flush for Non-exec + congruent mapping case)
	 */
	if ((vma->vm_flags & VM_EXEC) ||
	     addr_not_cache_congruent(paddr, vaddr)) {

		int dirty = !test_and_set_bit(PG_dc_clean, &page->flags);
		if (dirty) {
			/* wback + inv dcache lines */
			__flush_dcache_page(paddr, paddr);

			/* invalidate any existing icache lines */
			if (vma->vm_flags & VM_EXEC)
				__inv_icache_page(paddr, vaddr);
		}
	}
}
コード例 #13
0
/*
 * Actual dumper
 *
 * This is a two-pass process; first we find the offsets of the bits,
 * and then they are actually written out.  If we run out of core limit
 * we just truncate.
 */
static int elf_core_dump(long signr, struct pt_regs * regs, struct file * file)
{
	int has_dumped = 0;
	mm_segment_t fs;
	int segs;
	size_t size = 0;
	int i;
	struct vm_area_struct *vma;
	struct elfhdr elf;
	off_t offset = 0, dataoff;
	unsigned long limit = current->rlim[RLIMIT_CORE].rlim_cur;
	int numnote = 4;
	struct memelfnote notes[4];
	struct elf_prstatus prstatus;	/* NT_PRSTATUS */
	elf_fpregset_t fpu;		/* NT_PRFPREG */
	struct elf_prpsinfo psinfo;	/* NT_PRPSINFO */

	/* first copy the parameters from user space */
	memset(&psinfo, 0, sizeof(psinfo));
	{
		unsigned int i, len;

		len = current->mm->arg_end - current->mm->arg_start;
		if (len >= ELF_PRARGSZ)
			len = ELF_PRARGSZ-1;
		copy_from_user(&psinfo.pr_psargs,
			      (const char *)current->mm->arg_start, len);
		for(i = 0; i < len; i++)
			if (psinfo.pr_psargs[i] == 0)
				psinfo.pr_psargs[i] = ' ';
		psinfo.pr_psargs[len] = 0;

	}

	memset(&prstatus, 0, sizeof(prstatus));
	/*
	 * This transfers the registers from regs into the standard
	 * coredump arrangement, whatever that is.
	 */
#ifdef ELF_CORE_COPY_REGS
	ELF_CORE_COPY_REGS(prstatus.pr_reg, regs)
#else
	if (sizeof(elf_gregset_t) != sizeof(struct pt_regs))
	{
		printk("sizeof(elf_gregset_t) (%ld) != sizeof(struct pt_regs) (%ld)\n",
			(long)sizeof(elf_gregset_t), (long)sizeof(struct pt_regs));
	}
	else
		*(struct pt_regs *)&prstatus.pr_reg = *regs;
#endif

	/* now stop all vm operations */
	down_write(&current->mm->mmap_sem);
	segs = current->mm->map_count;

#ifdef DEBUG
	printk("elf_core_dump: %d segs %lu limit\n", segs, limit);
#endif

	/* Set up header */
	memcpy(elf.e_ident, ELFMAG, SELFMAG);
	elf.e_ident[EI_CLASS] = ELF_CLASS;
	elf.e_ident[EI_DATA] = ELF_DATA;
	elf.e_ident[EI_VERSION] = EV_CURRENT;
	memset(elf.e_ident+EI_PAD, 0, EI_NIDENT-EI_PAD);

	elf.e_type = ET_CORE;
	elf.e_machine = ELF_ARCH;
	elf.e_version = EV_CURRENT;
	elf.e_entry = 0;
	elf.e_phoff = sizeof(elf);
	elf.e_shoff = 0;
#ifdef ELF_CORE_EFLAGS
	elf.e_flags = ELF_CORE_EFLAGS;
#else
	elf.e_flags = 0;
#endif
	elf.e_ehsize = sizeof(elf);
	elf.e_phentsize = sizeof(struct elf_phdr);
	elf.e_phnum = segs+1;		/* Include notes */
	elf.e_shentsize = 0;
	elf.e_shnum = 0;
	elf.e_shstrndx = 0;

	fs = get_fs();
	set_fs(KERNEL_DS);

	has_dumped = 1;
	current->flags |= PF_DUMPCORE;

	DUMP_WRITE(&elf, sizeof(elf));
	offset += sizeof(elf);				/* Elf header */
	offset += (segs+1) * sizeof(struct elf_phdr);	/* Program headers */

	/*
	 * Set up the notes in similar form to SVR4 core dumps made
	 * with info from their /proc.
	 */

	notes[0].name = "CORE";
	notes[0].type = NT_PRSTATUS;
	notes[0].datasz = sizeof(prstatus);
	notes[0].data = &prstatus;
	prstatus.pr_info.si_signo = prstatus.pr_cursig = signr;
	prstatus.pr_sigpend = current->pending.signal.sig[0];
	prstatus.pr_sighold = current->blocked.sig[0];
	psinfo.pr_pid = prstatus.pr_pid = current->pid;
	psinfo.pr_ppid = prstatus.pr_ppid = current->p_pptr->pid;
	psinfo.pr_pgrp = prstatus.pr_pgrp = current->pgrp;
	psinfo.pr_sid = prstatus.pr_sid = current->session;
	prstatus.pr_utime.tv_sec = CT_TO_SECS(current->times.tms_utime);
	prstatus.pr_utime.tv_usec = CT_TO_USECS(current->times.tms_utime);
	prstatus.pr_stime.tv_sec = CT_TO_SECS(current->times.tms_stime);
	prstatus.pr_stime.tv_usec = CT_TO_USECS(current->times.tms_stime);
	prstatus.pr_cutime.tv_sec = CT_TO_SECS(current->times.tms_cutime);
	prstatus.pr_cutime.tv_usec = CT_TO_USECS(current->times.tms_cutime);
	prstatus.pr_cstime.tv_sec = CT_TO_SECS(current->times.tms_cstime);
	prstatus.pr_cstime.tv_usec = CT_TO_USECS(current->times.tms_cstime);

#ifdef DEBUG
	dump_regs("Passed in regs", (elf_greg_t *)regs);
	dump_regs("prstatus regs", (elf_greg_t *)&prstatus.pr_reg);
#endif

	notes[1].name = "CORE";
	notes[1].type = NT_PRPSINFO;
	notes[1].datasz = sizeof(psinfo);
	notes[1].data = &psinfo;
	i = current->state ? ffz(~current->state) + 1 : 0;
	psinfo.pr_state = i;
	psinfo.pr_sname = (i < 0 || i > 5) ? '.' : "RSDZTD"[i];
	psinfo.pr_zomb = psinfo.pr_sname == 'Z';
	psinfo.pr_nice = task_nice(current);
	psinfo.pr_flag = current->flags;
	psinfo.pr_uid = NEW_TO_OLD_UID(current->uid);
	psinfo.pr_gid = NEW_TO_OLD_GID(current->gid);
	strncpy(psinfo.pr_fname, current->comm, sizeof(psinfo.pr_fname));

	notes[2].name = "CORE";
	notes[2].type = NT_TASKSTRUCT;
	notes[2].datasz = sizeof(*current);
	notes[2].data = current;

	/* Try to dump the FPU. */
	prstatus.pr_fpvalid = dump_fpu (regs, &fpu);
	if (!prstatus.pr_fpvalid)
	{
		numnote--;
	}
	else
	{
		notes[3].name = "CORE";
		notes[3].type = NT_PRFPREG;
		notes[3].datasz = sizeof(fpu);
		notes[3].data = &fpu;
	}
	
	/* Write notes phdr entry */
	{
		struct elf_phdr phdr;
		int sz = 0;

		for(i = 0; i < numnote; i++)
			sz += notesize(&notes[i]);

		phdr.p_type = PT_NOTE;
		phdr.p_offset = offset;
		phdr.p_vaddr = 0;
		phdr.p_paddr = 0;
		phdr.p_filesz = sz;
		phdr.p_memsz = 0;
		phdr.p_flags = 0;
		phdr.p_align = 0;

		offset += phdr.p_filesz;
		DUMP_WRITE(&phdr, sizeof(phdr));
	}

	/* Page-align dumped data */
	dataoff = offset = roundup(offset, ELF_EXEC_PAGESIZE);

	/* Write program headers for segments dump */
	for(vma = current->mm->mmap; vma != NULL; vma = vma->vm_next) {
		struct elf_phdr phdr;
		size_t sz;

		sz = vma->vm_end - vma->vm_start;

		phdr.p_type = PT_LOAD;
		phdr.p_offset = offset;
		phdr.p_vaddr = vma->vm_start;
		phdr.p_paddr = 0;
		phdr.p_filesz = maydump(vma) ? sz : 0;
		phdr.p_memsz = sz;
		offset += phdr.p_filesz;
		phdr.p_flags = vma->vm_flags & VM_READ ? PF_R : 0;
		if (vma->vm_flags & VM_WRITE) phdr.p_flags |= PF_W;
		if (vma->vm_flags & VM_EXEC) phdr.p_flags |= PF_X;
		phdr.p_align = ELF_EXEC_PAGESIZE;

		DUMP_WRITE(&phdr, sizeof(phdr));
	}

	for(i = 0; i < numnote; i++)
		if (!writenote(&notes[i], file))
			goto end_coredump;

	DUMP_SEEK(dataoff);

	for(vma = current->mm->mmap; vma != NULL; vma = vma->vm_next) {
		unsigned long addr;

		if (!maydump(vma))
			continue;

#ifdef DEBUG
		printk("elf_core_dump: writing %08lx-%08lx\n", vma->vm_start, vma->vm_end);
#endif

		for (addr = vma->vm_start;
		     addr < vma->vm_end;
		     addr += PAGE_SIZE) {
			struct page* page;
			struct vm_area_struct *vma;

			if (get_user_pages(current, current->mm, addr, 1, 0, 1,
						&page, &vma) <= 0) {
				DUMP_SEEK (file->f_pos + PAGE_SIZE);
			} else {
				if (page == ZERO_PAGE(addr)) {
					DUMP_SEEK (file->f_pos + PAGE_SIZE);
				} else {
					void *kaddr;
					flush_cache_page(vma, addr);
					kaddr = kmap(page);
					DUMP_WRITE(kaddr, PAGE_SIZE);
					flush_page_to_ram(page);
					kunmap(page);
				}
				put_page(page);
			}
		}
	}

	if ((off_t) file->f_pos != offset) {
		/* Sanity check */
		printk("elf_core_dump: file->f_pos (%ld) != offset (%ld)\n",
		       (off_t) file->f_pos, offset);
	}

 end_coredump:
	set_fs(fs);
	up_write(&current->mm->mmap_sem);
	return has_dumped;
}
コード例 #14
0
ファイル: kexec_file.c プロジェクト: acton393/linux
/* Calculate and store the digest of segments */
static int kexec_calculate_store_digests(struct kimage *image)
{
	struct crypto_shash *tfm;
	struct shash_desc *desc;
	int ret = 0, i, j, zero_buf_sz, sha_region_sz;
	size_t desc_size, nullsz;
	char *digest;
	void *zero_buf;
	struct kexec_sha_region *sha_regions;
	struct purgatory_info *pi = &image->purgatory_info;

	zero_buf = __va(page_to_pfn(ZERO_PAGE(0)) << PAGE_SHIFT);
	zero_buf_sz = PAGE_SIZE;

	tfm = crypto_alloc_shash("sha256", 0, 0);
	if (IS_ERR(tfm)) {
		ret = PTR_ERR(tfm);
		goto out;
	}

	desc_size = crypto_shash_descsize(tfm) + sizeof(*desc);
	desc = kzalloc(desc_size, GFP_KERNEL);
	if (!desc) {
		ret = -ENOMEM;
		goto out_free_tfm;
	}

	sha_region_sz = KEXEC_SEGMENT_MAX * sizeof(struct kexec_sha_region);
	sha_regions = vzalloc(sha_region_sz);
	if (!sha_regions)
		goto out_free_desc;

	desc->tfm   = tfm;
	desc->flags = 0;

	ret = crypto_shash_init(desc);
	if (ret < 0)
		goto out_free_sha_regions;

	digest = kzalloc(SHA256_DIGEST_SIZE, GFP_KERNEL);
	if (!digest) {
		ret = -ENOMEM;
		goto out_free_sha_regions;
	}

	for (j = i = 0; i < image->nr_segments; i++) {
		struct kexec_segment *ksegment;

		ksegment = &image->segment[i];
		/*
		 * Skip purgatory as it will be modified once we put digest
		 * info in purgatory.
		 */
		if (ksegment->kbuf == pi->purgatory_buf)
			continue;

		ret = crypto_shash_update(desc, ksegment->kbuf,
					  ksegment->bufsz);
		if (ret)
			break;

		/*
		 * Assume rest of the buffer is filled with zero and
		 * update digest accordingly.
		 */
		nullsz = ksegment->memsz - ksegment->bufsz;
		while (nullsz) {
			unsigned long bytes = nullsz;

			if (bytes > zero_buf_sz)
				bytes = zero_buf_sz;
			ret = crypto_shash_update(desc, zero_buf, bytes);
			if (ret)
				break;
			nullsz -= bytes;
		}

		if (ret)
			break;

		sha_regions[j].start = ksegment->mem;
		sha_regions[j].len = ksegment->memsz;
		j++;
	}

	if (!ret) {
		ret = crypto_shash_final(desc, digest);
		if (ret)
			goto out_free_digest;
		ret = kexec_purgatory_get_set_symbol(image, "sha_regions",
						sha_regions, sha_region_sz, 0);
		if (ret)
			goto out_free_digest;

		ret = kexec_purgatory_get_set_symbol(image, "sha256_digest",
						digest, SHA256_DIGEST_SIZE, 0);
		if (ret)
			goto out_free_digest;
	}

out_free_digest:
	kfree(digest);
out_free_sha_regions:
	vfree(sha_regions);
out_free_desc:
	kfree(desc);
out_free_tfm:
	kfree(tfm);
out:
	return ret;
}
コード例 #15
0
ファイル: ddump_mt.c プロジェクト: jamescth/notes
static void
walk_through_vma(struct task_struct *tp, ddump_comp_task_t *compt)
{
	struct vm_area_struct 	*vma = NULL;
	struct vm_area_struct 	*gate_vma= NULL;
	//int                     blk_in_chunk = 0;
	int			ret;
	int			i = 0;

	init_completion(&compt->cp_buff_io_read);

	for (vma = first_vma(tp, gate_vma) ; vma != NULL ;
			vma = next_vma(vma, gate_vma)) {

		unsigned long addr;
		//unsigned long pages_in_vma = 0;

#if 0
		if (!maydump_elf_vma(vma, mm_flags)) {
			// Dbg("VMA <%lx> startva <%lx> skipped!", 
			// (unsigned long)vma, vma->vm_start);
			continue;
		}
#endif
		for (addr = vma->vm_start; addr < vma->vm_end;
			 addr += PAGE_SIZE) {

			struct vm_area_struct *vma_temp;
			struct page           *page;

			ret = get_user_pages(tp, tp->mm,
				addr, 1, 0, 1, &page, &vma_temp);

			if (ret > 0) { 
				if (page == ZERO_PAGE(page)) {
					compt->cp_app_zero_pages++;
					atomic64_inc(&app_zero_pages);
					//memset(primary->scratch + blk_in_chunk * PAGE_SIZE, 
					//	0, PAGE_SIZE);
				} else {
					compt->cp_app_non_zero_pages++;
					atomic64_inc(&app_non_zero_pages);
					//kaddr = kmap_atomic(page, 0);
					//memcpy(primary->scratch + (blk_in_chunk * PAGE_SIZE), 
					//	kaddr, PAGE_SIZE);
					//kunmap_atomic(kaddr, 0);
				}
				page_cache_release(page);

			}  else {
				compt->cp_app_other_pages++;
				atomic64_inc(&app_other_pages);
				//memset(primary->scratch + blk_in_chunk * PAGE_SIZE, 
				//	0, PAGE_SIZE);
			}

			i++;
			//printk("%d i %d\n", compt->cp_order, i);

			// take a break here.
			if ((i%8) == 0) {
				//read_waiter(compt);
				//init_completion(&compt->cp_buff_io_read);
				//printk("init r %d i %d\n",compt->cp_order, i);
				//for ( i = 0 ; i< 1000; i++);
				complete(&compt->cp_buff_io_read);

				printk("wait w %d i %d\n",compt->cp_order, i);
				wait_for_completion(&compt->cp_buff_io_write);
			}
		} // addr
	} // first_vma

	if ((i%8) != 0) {
		complete(&compt->cp_buff_io_read);
	}
}
コード例 #16
0
static int
rbio_submit(struct ploop_io * io, struct nfs_read_data * nreq,
	    const struct rpc_call_ops * cb)
{
	struct nfs_open_context *ctx = nfs_file_open_context(io->files.file);
	struct inode *inode = io->files.inode;
	struct rpc_task *task;

	struct rpc_message msg = {
		.rpc_cred = ctx->cred,
	};

	struct rpc_task_setup task_setup_data = {
		.rpc_client = NFS_CLIENT(inode),
		.rpc_message = &msg,
		.callback_ops = cb,
#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,25)
		.workqueue = nfsio_workqueue,
#endif
		.flags = RPC_TASK_ASYNC,
	};

	nreq->res.count = nreq->args.count;
	nreq->header->cred = msg.rpc_cred;
	nreq->args.context = ctx;

	task_setup_data.task = &nreq->task;
	task_setup_data.callback_data = nreq;
	msg.rpc_argp = &nreq->args;
	msg.rpc_resp = &nreq->res;
	NFS_PROTO(inode)->read_setup(nreq, &msg);

	task = rpc_run_task(&task_setup_data);
	if (unlikely(IS_ERR(task)))
		return PTR_ERR(task);

	rpc_put_task(task);
	return 0;
}

#else

static int
rbio_submit(struct ploop_io * io, struct nfs_read_data * nreq,
	    const struct rpc_call_ops * cb)
{
	struct nfs_open_context *ctx = nfs_file_open_context(io->files.file);
	struct inode *inode = io->files.inode;

	nreq->res.count = nreq->args.count;
	nreq->cred = ctx->cred;
	nreq->args.context = ctx;

	rpc_init_task(&nreq->task, NFS_CLIENT(inode), RPC_TASK_ASYNC, cb, nreq);
	NFS_PROTO(inode)->read_setup(nreq);

	nreq->task.tk_cookie = (unsigned long) inode;

	lock_kernel();
	rpc_execute(&nreq->task);
	unlock_kernel();
	return 0;
}
#endif

static void
nfsio_submit_read(struct ploop_io *io, struct ploop_request * preq,
		  struct bio_list *sbl, iblock_t iblk, unsigned int size)
{
	struct inode *inode = io->files.inode;
	size_t rsize = NFS_SERVER(inode)->rsize;
	struct nfs_read_data *nreq = NULL;
	loff_t pos;
	unsigned int prev_end;
	struct bio * b;

	ploop_prepare_io_request(preq);

	pos = sbl->head->bi_sector;
	pos = ((loff_t)iblk << preq->plo->cluster_log) | (pos & ((1<<preq->plo->cluster_log) - 1));
	pos <<= 9;

	prev_end = PAGE_SIZE;

	for (b = sbl->head; b != NULL; b = b->bi_next) {
		int bv_idx;

		for (bv_idx = 0; bv_idx < b->bi_vcnt; bv_idx++) {
			struct bio_vec * bv = &b->bi_io_vec[bv_idx];

			if (nreq && nreq->args.count + bv->bv_len <= rsize) {
				if (nreq->pages.pagevec[nreq->pages.npages-1] == bv->bv_page &&
				    prev_end == bv->bv_offset) {
					nreq->args.count += bv->bv_len;
					pos += bv->bv_len;
					prev_end += bv->bv_len;
					continue;
				}
				if (nreq->pages.npages < MAX_NBIO_PAGES &&
				    bv->bv_offset == 0 && prev_end == PAGE_SIZE) {
					nreq->args.count += bv->bv_len;
					nreq->pages.pagevec[nreq->pages.npages] = bv->bv_page;
					nreq->pages.npages++;
					pos += bv->bv_len;
					prev_end = bv->bv_offset + bv->bv_len;
					continue;
				}
			}

			if (nreq) {
				int err;

				atomic_inc(&preq->io_count);

				err = rbio_submit(io, nreq, &nfsio_read_ops);
				if (err) {
					PLOOP_REQ_SET_ERROR(preq, err);
					ploop_complete_io_request(preq);
					goto out;
				}
			}

			nreq = rbio_init(pos, bv->bv_page, bv->bv_offset,
					 bv->bv_len, preq, inode);

			if (nreq == NULL) {
				PLOOP_REQ_SET_ERROR(preq, -ENOMEM);
				goto out;
			}

			pos += bv->bv_len;
			prev_end = bv->bv_offset + bv->bv_len;
		}
	}

	if (nreq) {
		int err;

		atomic_inc(&preq->io_count);

		err = rbio_submit(io, nreq, &nfsio_read_ops);
		if (err) {
			PLOOP_REQ_SET_ERROR(preq, err);
			ploop_complete_io_request(preq);
			goto out;
		}
	}

out:
	ploop_complete_io_request(preq);
}

static void nfsio_write_result(struct rpc_task *task, void *calldata)
{
	struct nfs_write_data *data = calldata;
	struct nfs_writeargs	*argp = &data->args;
	struct nfs_writeres	*resp = &data->res;
	int status;

	status = NFS_PROTO(data->header->inode)->write_done(task, data);
	if (status != 0)
		return;

	if (task->tk_status >= 0 && resp->count < argp->count)
		task->tk_status = -EIO;
}

static void nfsio_write_release(void *calldata)
{
	struct nfs_write_data *nreq = calldata;
	struct ploop_request *preq = (struct ploop_request *) nreq->header->req;
	int status = nreq->task.tk_status;

	if (unlikely(status < 0))
		PLOOP_REQ_SET_ERROR(preq, status);

	if (!preq->error &&
	    nreq->res.verf->committed != NFS_FILE_SYNC) {
		if (!test_and_set_bit(PLOOP_REQ_UNSTABLE, &preq->state))
			memcpy(&preq->verf, &nreq->res.verf->verifier, 8);
	}
	nfsio_complete_io_request(preq);

	nfsio_wbio_release(calldata);
}

static const struct rpc_call_ops nfsio_write_ops = {
	.rpc_call_done = nfsio_write_result,
	.rpc_release = nfsio_write_release,
};

static struct nfs_write_data *
wbio_init(loff_t pos, struct page * page, unsigned int off, unsigned int len,
	  void * priv, struct inode * inode)
{
	struct nfs_write_data * nreq;

	nreq = nfsio_wbio_alloc(MAX_NBIO_PAGES);
	if (unlikely(nreq == NULL))
		return NULL;

	nreq->args.offset = pos;
	nreq->args.pgbase = off;
	nreq->args.count = len;
	nreq->pages.pagevec[0] = page;
	nreq->pages.npages = 1;
	nreq->header->req = priv;
	nreq->header->inode = inode;
	nreq->args.fh = NFS_FH(inode);
	nreq->args.pages = nreq->pages.pagevec;
	nreq->args.stable = NFS_UNSTABLE;
	nreq->res.fattr = &nreq->fattr;
	nreq->res.verf = &nreq->verf;
	return nreq;
}

#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,18)

static int wbio_submit(struct ploop_io * io, struct nfs_write_data *nreq,
		       const struct rpc_call_ops * cb)
{
	struct nfs_open_context *ctx = nfs_file_open_context(io->files.file);
	struct inode *inode = io->files.inode;

	struct rpc_task *task;
	struct rpc_message msg = {
		.rpc_cred = ctx->cred,
	};

	struct rpc_task_setup task_setup_data = {
		.rpc_client = NFS_CLIENT(inode),
		.rpc_message = &msg,
		.callback_ops = cb,
#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,25)
		.workqueue = nfsio_workqueue,
#endif
		.flags = RPC_TASK_ASYNC,
	};

	if (verify_bounce(nreq))
		return -ENOMEM;

	nreq->res.count = nreq->args.count;
	nreq->args.context = ctx;
	nreq->header->cred = msg.rpc_cred;

	task_setup_data.task = &nreq->task;
	task_setup_data.callback_data = nreq;
	msg.rpc_argp = &nreq->args;
	msg.rpc_resp = &nreq->res;
	NFS_PROTO(inode)->write_setup(nreq, &msg);

	task = rpc_run_task(&task_setup_data);
	if (unlikely(IS_ERR(task)))
		return PTR_ERR(task);
	rpc_put_task(task);
	return 0;
}

#else

static int wbio_submit(struct ploop_io * io, struct nfs_write_data *nreq,
		       const struct rpc_call_ops * cb)
{
	struct nfs_open_context *ctx = nfs_file_open_context(io->files.file);
	struct inode *inode = io->files.inode;

	if (verify_bounce(nreq))
		return -ENOMEM;

	nreq->res.count = nreq->args.count;
	nreq->args.context = ctx;
	nreq->cred = ctx->cred;

	rpc_init_task(&nreq->task, NFS_CLIENT(inode), RPC_TASK_ASYNC, cb, nreq);
	NFS_PROTO(inode)->write_setup(nreq, NFS_UNSTABLE);

	nreq->task.tk_priority = RPC_PRIORITY_NORMAL;
	nreq->task.tk_cookie = (unsigned long) inode;

	lock_kernel();
	rpc_execute(&nreq->task);
	unlock_kernel();
	return 0;
}

#endif


static void
nfsio_submit_write(struct ploop_io *io, struct ploop_request * preq,
		   struct bio_list *sbl, iblock_t iblk, unsigned int size)
{
	struct inode *inode = io->files.inode;
	size_t wsize = NFS_SERVER(inode)->wsize;
	struct nfs_write_data *nreq = NULL;
	loff_t pos;
	struct bio * b;
	unsigned int prev_end;

	nfsio_prepare_io_request(preq);

	pos = sbl->head->bi_sector;
	pos = ((loff_t)iblk << preq->plo->cluster_log) | (pos & ((1<<preq->plo->cluster_log) - 1));
	ploop_prepare_tracker(preq, pos);
	pos <<= 9;

	prev_end = PAGE_SIZE;

	for (b = sbl->head; b != NULL; b = b->bi_next) {
		int bv_idx;

		for (bv_idx = 0; bv_idx < b->bi_vcnt; bv_idx++) {
			struct bio_vec * bv = &b->bi_io_vec[bv_idx];

			if (nreq && nreq->args.count + bv->bv_len <= wsize) {
				if (nreq->pages.pagevec[nreq->pages.npages-1] == bv->bv_page &&
				    prev_end == bv->bv_offset) {
					nreq->args.count += bv->bv_len;
					pos += bv->bv_len;
					prev_end += bv->bv_len;
					continue;
				}
				if (nreq->pages.npages < MAX_NBIO_PAGES &&
				    bv->bv_offset == 0 && prev_end == PAGE_SIZE) {
					nreq->args.count += bv->bv_len;
					nreq->pages.pagevec[nreq->pages.npages] = bv->bv_page;
					nreq->pages.npages++;
					pos += bv->bv_len;
					prev_end = bv->bv_offset + bv->bv_len;
					continue;
				}
			}

			if (nreq) {
				int err;
				atomic_inc(&preq->io_count);
				err = wbio_submit(io, nreq, &nfsio_write_ops);
				if (err) {
					PLOOP_REQ_SET_ERROR(preq, err);
					nfsio_complete_io_request(preq);
					goto out;
				}
			}

			nreq = wbio_init(pos, bv->bv_page, bv->bv_offset,
					 bv->bv_len, preq, inode);

			if (nreq == NULL) {
				PLOOP_REQ_SET_ERROR(preq, -ENOMEM);
				goto out;
			}

			prev_end = bv->bv_offset + bv->bv_len;
			pos += bv->bv_len;
		}
	}

	if (nreq) {
		int err;
		atomic_inc(&preq->io_count);
		err = wbio_submit(io, nreq, &nfsio_write_ops);
		if (err) {
			PLOOP_REQ_SET_ERROR(preq, err);
			nfsio_complete_io_request(preq);
		}
	}

out:
	nfsio_complete_io_request(preq);
}

static void
nfsio_submit(struct ploop_io *io, struct ploop_request * preq,
	     unsigned long rw,
	     struct bio_list *sbl, iblock_t iblk, unsigned int size)
{
	if (iblk == PLOOP_ZERO_INDEX)
		iblk = 0;

	if (rw & (1<<BIO_RW))
		nfsio_submit_write(io, preq, sbl, iblk, size);
	else
		nfsio_submit_read(io, preq, sbl, iblk, size);
}

struct bio_list_walk
{
	struct bio * cur;
	int idx;
	int bv_off;
};

static void
nfsio_submit_write_pad(struct ploop_io *io, struct ploop_request * preq,
		       struct bio_list *sbl, iblock_t iblk, unsigned int size)
{
	struct inode *inode = io->files.inode;
	size_t wsize = NFS_SERVER(inode)->wsize;
	struct nfs_write_data *nreq = NULL;
	struct bio_list_walk bw;
	unsigned prev_end;

	loff_t pos, end_pos, start, end;

	/* pos..end_pos is the range which we are going to write */
	pos = (loff_t)iblk << (preq->plo->cluster_log + 9);
	end_pos = pos + (1 << (preq->plo->cluster_log + 9));

	/* start..end is data that we have. The rest must be zero padded. */
	start = pos + ((sbl->head->bi_sector & ((1<<preq->plo->cluster_log) - 1)) << 9);
	end = start + (size << 9);

	nfsio_prepare_io_request(preq);
	ploop_prepare_tracker(preq, start >> 9);

	prev_end = PAGE_SIZE;

#if 1
	/* GCC, shut up! */
	bw.cur = sbl->head;
	bw.idx = 0;
	bw.bv_off = 0;
	BUG_ON(bw.cur->bi_io_vec[0].bv_len & 511);
#endif

	while (pos < end_pos) {
		struct page * page;
		unsigned int poff, plen;

		if (pos < start) {
			page = ZERO_PAGE(0);
			poff = 0;
			plen = start - pos;
			if (plen > PAGE_SIZE)
				plen = PAGE_SIZE;
		} else if (pos >= end) {
			page = ZERO_PAGE(0);
			poff = 0;
			plen = end_pos - pos;
			if (plen > PAGE_SIZE)
				plen = PAGE_SIZE;
		} else {
			/* pos >= start && pos < end */
			struct bio_vec * bv;

			if (pos == start) {
				bw.cur = sbl->head;
				bw.idx = 0;
				bw.bv_off = 0;
				BUG_ON(bw.cur->bi_io_vec[0].bv_len & 511);
			}
			bv = bw.cur->bi_io_vec + bw.idx;

			if (bw.bv_off >= bv->bv_len) {
				bw.idx++;
				bv++;
				bw.bv_off = 0;
				if (bw.idx >= bw.cur->bi_vcnt) {
					bw.cur = bw.cur->bi_next;
					bw.idx = 0;
					bw.bv_off = 0;
					bv = bw.cur->bi_io_vec;
				}
				BUG_ON(bv->bv_len & 511);
			}

			page = bv->bv_page;
			poff = bv->bv_offset + bw.bv_off;
			plen = bv->bv_len - bw.bv_off;
		}

		if (nreq && nreq->args.count + plen <= wsize) {
			if (nreq->pages.pagevec[nreq->pages.npages-1] == page &&
			    prev_end == poff) {
				nreq->args.count += plen;
				pos += plen;
				bw.bv_off += plen;
				prev_end += plen;
				continue;
			}
			if (nreq->pages.npages < MAX_NBIO_PAGES &&
			    poff == 0 && prev_end == PAGE_SIZE) {
				nreq->args.count += plen;
				nreq->pages.pagevec[nreq->pages.npages] = page;
				nreq->pages.npages++;
				pos += plen;
				bw.bv_off += plen;
				prev_end = poff + plen;
				continue;
			}
		}

		if (nreq) {
			int err;
			atomic_inc(&preq->io_count);
			err = wbio_submit(io, nreq, &nfsio_write_ops);
			if (err) {
				PLOOP_REQ_SET_ERROR(preq, err);
				nfsio_complete_io_request(preq);
				goto out;
			}
		}

		nreq = wbio_init(pos, page, poff, plen, preq, inode);

		if (nreq == NULL) {
			PLOOP_REQ_SET_ERROR(preq, -ENOMEM);
			goto out;
		}

		prev_end = poff + plen;
		pos += plen;
		bw.bv_off += plen;
	}

	if (nreq) {
		int err;
		atomic_inc(&preq->io_count);
		err = wbio_submit(io, nreq, &nfsio_write_ops);
		if (err) {
			PLOOP_REQ_SET_ERROR(preq, err);
			nfsio_complete_io_request(preq);
		}
	}

out:
	nfsio_complete_io_request(preq);
}


static void
nfsio_submit_alloc(struct ploop_io *io, struct ploop_request * preq,
		 struct bio_list * sbl, unsigned int size)
{
	iblock_t iblk = io->alloc_head++;

	if (!(io->files.file->f_mode & FMODE_WRITE)) {
		PLOOP_FAIL_REQUEST(preq, -EBADF);
		return;
	}
	preq->iblock = iblk;
	preq->eng_state = PLOOP_E_DATA_WBI;

	nfsio_submit_write_pad(io, preq, sbl, iblk, size);
}

static void nfsio_destroy(struct ploop_io * io)
{
	if (io->fsync_thread) {
		kthread_stop(io->fsync_thread);
		io->fsync_thread = NULL;
	}

	if (io->files.file) {
		struct file * file = io->files.file;
		mutex_lock(&io->plo->sysfs_mutex);
		io->files.file = NULL;
		if (io->files.mapping)
			(void)invalidate_inode_pages2(io->files.mapping);
		mutex_unlock(&io->plo->sysfs_mutex);
		fput(file);
	}
}

static int nfsio_sync(struct ploop_io * io)
{
	return 0;
}

static int nfsio_stop(struct ploop_io * io)
{
	return 0;
}


static int
nfsio_init(struct ploop_io * io)
{
	INIT_LIST_HEAD(&io->fsync_queue);
	init_waitqueue_head(&io->fsync_waitq);
	return 0;
}
コード例 #17
0
dma_addr_t iovmm_map(struct device *dev, struct scatterlist *sg, off_t offset,
								size_t size)
{
	off_t start_off;
	dma_addr_t addr, start = 0;
	size_t mapped_size = 0;
	struct exynos_vm_region *region;
	struct exynos_iovmm *vmm = exynos_get_iovmm(dev);
	int order;
	int ret;
	int count =0;
#ifdef CONFIG_EXYNOS_IOVMM_ALIGN64K
	size_t iova_size = 0;
#endif
	for (; sg_dma_len(sg) < offset; sg = sg_next(sg))
		offset -= sg_dma_len(sg);

	start_off = offset_in_page(sg_phys(sg) + offset);
	size = PAGE_ALIGN(size + start_off);

	order = __fls(min_t(size_t, size, SZ_1M));

	region = kmalloc(sizeof(*region), GFP_KERNEL);
	if (!region) {
		ret = -ENOMEM;
		goto err_map_nomem;
	}

#ifdef CONFIG_EXYNOS_IOVMM_ALIGN64K
	iova_size = ALIGN(size, SZ_64K);
	start = (dma_addr_t)gen_pool_alloc_aligned(vmm->vmm_pool, iova_size,
									order);
#else
	start = (dma_addr_t)gen_pool_alloc(vmm->vmm_pool, size);
#endif
	if (!start) {
		ret = -ENOMEM;
		goto err_map_noiomem;
	}

	addr = start;
	do {
		phys_addr_t phys;
		size_t len;

		phys = sg_phys(sg);
		len = sg_dma_len(sg);

		/* if back to back sg entries are contiguous consolidate them */
		while (sg_next(sg) &&
		       sg_phys(sg) + sg_dma_len(sg) == sg_phys(sg_next(sg))) {
			len += sg_dma_len(sg_next(sg));
			sg = sg_next(sg);
		}

		if (offset > 0) {
			len -= offset;
			phys += offset;
			offset = 0;
		}

		if (offset_in_page(phys)) {
			len += offset_in_page(phys);
			phys = round_down(phys, PAGE_SIZE);
		}

		len = PAGE_ALIGN(len);

		if (len > (size - mapped_size))
			len = size - mapped_size;

		ret = iommu_map(vmm->domain, addr, phys, len, 0);
		if (ret)
			break;

		addr += len;
		mapped_size += len;
	} while ((sg = sg_next(sg)) && (mapped_size < size));
	BUG_ON(mapped_size > size);

	if (mapped_size < size) {
		pr_err("IOVMM: iovmm_map failed as mapped_size (%d) < size (%d)\n", mapped_size, size);
		goto err_map_map;
	}

#ifdef CONFIG_EXYNOS_IOVMM_ALIGN64K
	if (iova_size != size) {
		addr = start + size;
		size = iova_size;

		for (; addr < start + size; addr += PAGE_SIZE) {
			ret = iommu_map(vmm->domain, addr,
				page_to_phys(ZERO_PAGE(0)), PAGE_SIZE, 0);
			if (ret)
				goto err_map_map;

			mapped_size += PAGE_SIZE;
		}
	}
#endif

	region->start = start + start_off;
	region->size = size;

	INIT_LIST_HEAD(&region->node);

	spin_lock(&vmm->lock);

	list_add(&region->node, &vmm->regions_list);

	spin_unlock(&vmm->lock);

	dev_dbg(dev, "IOVMM: Allocated VM region @ %#x/%#X bytes.\n",
					region->start, region->size);

	return region->start;

err_map_map:
	iommu_unmap(vmm->domain, start, mapped_size);
	gen_pool_free(vmm->vmm_pool, start, size);
err_map_noiomem:
	kfree(region);
err_map_nomem:
	dev_dbg(dev, "IOVMM: Failed to allocated VM region for %#x bytes.\n",
									size);
	return (dma_addr_t)ret;
}
コード例 #18
0
ファイル: cpup.c プロジェクト: aywq2008/omniplay
static int au_do_copy_file(struct file *dst, struct file *src, loff_t len,
			   char *buf, unsigned long blksize)
{
	int err;
	size_t sz, rbytes, wbytes;
	unsigned char all_zero;
	char *p, *zp;
	struct mutex *h_mtx;
	/* reduce stack usage */
	struct iattr *ia;

	zp = page_address(ZERO_PAGE(0));
	if (unlikely(!zp))
		return -ENOMEM; /* possible? */

	err = 0;
	all_zero = 0;
	while (len) {
		AuDbg("len %lld\n", len);
		sz = blksize;
		if (len < blksize)
			sz = len;

		rbytes = 0;
		/* todo: signal_pending? */
		while (!rbytes || err == -EAGAIN || err == -EINTR) {
			rbytes = vfsub_read_k(src, buf, sz, &src->f_pos);
			err = rbytes;
		}
		if (unlikely(err < 0))
			break;

		all_zero = 0;
		if (len >= rbytes && rbytes == blksize)
			all_zero = !memcmp(buf, zp, rbytes);
		if (!all_zero) {
			wbytes = rbytes;
			p = buf;
			while (wbytes) {
				size_t b;

				b = vfsub_write_k(dst, p, wbytes, &dst->f_pos);
				err = b;
				/* todo: signal_pending? */
				if (unlikely(err == -EAGAIN || err == -EINTR))
					continue;
				if (unlikely(err < 0))
					break;
				wbytes -= b;
				p += b;
			}
		} else {
			loff_t res;

			AuLabel(hole);
			res = vfsub_llseek(dst, rbytes, SEEK_CUR);
			err = res;
			if (unlikely(res < 0))
				break;
		}
		len -= rbytes;
		err = 0;
	}

	/* the last block may be a hole */
	if (!err && all_zero) {
		AuLabel(last hole);

		err = 1;
		if (au_test_nfs(dst->f_dentry->d_sb)) {
			/* nfs requires this step to make last hole */
			/* is this only nfs? */
			do {
				/* todo: signal_pending? */
				err = vfsub_write_k(dst, "\0", 1, &dst->f_pos);
			} while (err == -EAGAIN || err == -EINTR);
			if (err == 1)
				dst->f_pos--;
		}

		if (err == 1) {
			ia = (void *)buf;
			ia->ia_size = dst->f_pos;
			ia->ia_valid = ATTR_SIZE | ATTR_FILE;
			ia->ia_file = dst;
			h_mtx = &dst->f_dentry->d_inode->i_mutex;
			mutex_lock_nested(h_mtx, AuLsc_I_CHILD2);
			err = vfsub_notify_change(&dst->f_path, ia);
			mutex_unlock(h_mtx);
		}
	}

	return err;
}
コード例 #19
0
ファイル: crypto.c プロジェクト: Chong-Li/cse522
int ext4_encrypted_zeroout(struct inode *inode, struct ext4_extent *ex)
{
	struct ext4_crypto_ctx	*ctx;
	struct page		*ciphertext_page = NULL;
	struct bio		*bio;
	ext4_lblk_t		lblk = le32_to_cpu(ex->ee_block);
	ext4_fsblk_t		pblk = ext4_ext_pblock(ex);
	unsigned int		len = ext4_ext_get_actual_len(ex);
	int			ret, err = 0;

#if 0
	ext4_msg(inode->i_sb, KERN_CRIT,
		 "ext4_encrypted_zeroout ino %lu lblk %u len %u",
		 (unsigned long) inode->i_ino, lblk, len);
#endif

	BUG_ON(inode->i_sb->s_blocksize != PAGE_CACHE_SIZE);

	ctx = ext4_get_crypto_ctx(inode);
	if (IS_ERR(ctx))
		return PTR_ERR(ctx);

	ciphertext_page = alloc_bounce_page(ctx);
	if (IS_ERR(ciphertext_page)) {
		err = PTR_ERR(ciphertext_page);
		goto errout;
	}

	while (len--) {
		err = ext4_page_crypto(inode, EXT4_ENCRYPT, lblk,
				       ZERO_PAGE(0), ciphertext_page);
		if (err)
			goto errout;

		bio = bio_alloc(GFP_KERNEL, 1);
		if (!bio) {
			err = -ENOMEM;
			goto errout;
		}
		bio->bi_bdev = inode->i_sb->s_bdev;
		bio->bi_iter.bi_sector =
			pblk << (inode->i_sb->s_blocksize_bits - 9);
		ret = bio_add_page(bio, ciphertext_page,
				   inode->i_sb->s_blocksize, 0);
		if (ret != inode->i_sb->s_blocksize) {
			/* should never happen! */
			ext4_msg(inode->i_sb, KERN_ERR,
				 "bio_add_page failed: %d", ret);
			WARN_ON(1);
			bio_put(bio);
			err = -EIO;
			goto errout;
		}
		err = submit_bio_wait(WRITE, bio);
		if ((err == 0) && bio->bi_error)
			err = -EIO;
		bio_put(bio);
		if (err)
			goto errout;
		lblk++; pblk++;
	}
	err = 0;
errout:
	ext4_release_crypto_ctx(ctx);
	return err;
}