Beispiel #1
0
static unsigned long move_vma(struct vm_area_struct * vma,
	unsigned long addr, unsigned long old_len, unsigned long new_len,
	unsigned long new_addr)
{
	struct mm_struct * mm = vma->vm_mm;
	struct vm_area_struct * new_vma, * next, * prev;
	int allocated_vma;


	new_vma = NULL;
	next = find_vma_prev(mm, new_addr, &prev);
	if (next) {
		if (prev && prev->vm_end == new_addr &&
		    can_vma_merge(prev, vma->vm_flags) && !vma->vm_file && !(vma->vm_flags & VM_SHARED)) {
			spin_lock(&mm->page_table_lock);
			prev->vm_end = new_addr + new_len;
			spin_unlock(&mm->page_table_lock);
			new_vma = prev;
			if (next != prev->vm_next)
				BUG();
			if (prev->vm_end == next->vm_start && can_vma_merge(next, prev->vm_flags)) {
				spin_lock(&mm->page_table_lock);
				prev->vm_end = next->vm_end;
				__vma_unlink(mm, next, prev);
				spin_unlock(&mm->page_table_lock);
				if (vma == next)
					vma = prev;
				mm->map_count--;
				kmem_cache_free(vm_area_cachep, next);
			}
		} else if (next->vm_start == new_addr + new_len &&
			   can_vma_merge(next, vma->vm_flags) && !vma->vm_file && !(vma->vm_flags & VM_SHARED)) {
			spin_lock(&mm->page_table_lock);
			next->vm_start = new_addr;
			spin_unlock(&mm->page_table_lock);
			new_vma = next;
		}
	} else {
		prev = find_vma(mm, new_addr-1);
		if (prev && prev->vm_end == new_addr &&
		    can_vma_merge(prev, vma->vm_flags) && !vma->vm_file && !(vma->vm_flags & VM_SHARED)) {
			spin_lock(&mm->page_table_lock);
			prev->vm_end = new_addr + new_len;
			spin_unlock(&mm->page_table_lock);
			new_vma = prev;
		}
	}

	allocated_vma = 0;
	if (!new_vma) {
		new_vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
		if (!new_vma)
			goto out;
		allocated_vma = 1;
	}

	if (!move_page_tables(vma, new_addr, addr, old_len)) {
		if (allocated_vma) {
			*new_vma = *vma;
			new_vma->vm_start = new_addr;
			new_vma->vm_end = new_addr+new_len;
			new_vma->vm_pgoff += (addr - vma->vm_start) >> PAGE_SHIFT;
			new_vma->vm_raend = 0;
			if (new_vma->vm_file)
				get_file(new_vma->vm_file);
			if (new_vma->vm_ops && new_vma->vm_ops->open)
				new_vma->vm_ops->open(new_vma);
			insert_vm_struct(current->mm, new_vma);
		}
		/* The old VMA has been accounted for, don't double account */
		do_munmap(current->mm, addr, old_len, 0);
		current->mm->total_vm += new_len >> PAGE_SHIFT;
		if (new_vma->vm_flags & VM_LOCKED) {
			current->mm->locked_vm += new_len >> PAGE_SHIFT;
			make_pages_present(new_vma->vm_start,
					   new_vma->vm_end);
		}
		return new_addr;
	}
struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t gfp_mask)
{
	struct sk_buff *n;

	n = skb + 1;
	if (skb->fclone == SKB_FCLONE_ORIG &&
	    n->fclone == SKB_FCLONE_UNAVAILABLE) {
		atomic_t *fclone_ref = (atomic_t *) (n + 1);
		n->fclone = SKB_FCLONE_CLONE;
		atomic_inc(fclone_ref);
	} else {
		n = kmem_cache_alloc(skbuff_head_cache, gfp_mask);
		if (!n)
			return NULL;
		n->fclone = SKB_FCLONE_UNAVAILABLE;
	}

#define C(x) n->x = skb->x

	n->next = n->prev = NULL;
	n->sk = NULL;
	C(tstamp);
	C(dev);
	C(h);
	C(nh);
	C(mac);
	C(dst);
	dst_clone(skb->dst);
	C(sp);
#ifdef CONFIG_INET
	secpath_get(skb->sp);
#endif
	memcpy(n->cb, skb->cb, sizeof(skb->cb));
	C(len);
	C(data_len);
	C(csum);
	C(local_df);
	n->cloned = 1;
	n->nohdr = 0;
	C(pkt_type);
	C(ip_summed);
	C(priority);
#if defined(CONFIG_IP_VS) || defined(CONFIG_IP_VS_MODULE)
	C(ipvs_property);
#endif
	C(protocol);
	n->destructor = NULL;
#ifdef CONFIG_NETFILTER
	C(nfmark);
	C(nfct);
	nf_conntrack_get(skb->nfct);
	C(nfctinfo);
#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
	C(nfct_reasm);
	nf_conntrack_get_reasm(skb->nfct_reasm);
#endif
#ifdef CONFIG_BRIDGE_NETFILTER
	C(nf_bridge);
	nf_bridge_get(skb->nf_bridge);
#endif
#endif /*CONFIG_NETFILTER*/
#ifdef CONFIG_NET_SCHED
	C(tc_index);
#ifdef CONFIG_NET_CLS_ACT
	n->tc_verd = SET_TC_VERD(skb->tc_verd,0);
	n->tc_verd = CLR_TC_OK2MUNGE(n->tc_verd);
	n->tc_verd = CLR_TC_MUNGED(n->tc_verd);
	C(input_dev);
#endif

#endif
	C(truesize);
	atomic_set(&n->users, 1);
	C(head);
	C(data);
	C(tail);
	C(end);

	atomic_inc(&(skb_shinfo(skb)->dataref));
	skb->cloned = 1;

	return n;
}
Beispiel #3
0
/**
 * ecryptfs_lookup
 * @dir: inode
 * @dentry: The dentry
 * @nd: nameidata, may be NULL
 *
 * Find a file on disk. If the file does not exist, then we'll add it to the
 * dentry cache and continue on to read it from the disk.
 */
static struct dentry *ecryptfs_lookup(struct inode *dir, struct dentry *dentry,
				      struct nameidata *nd)
{
	int rc = 0;
	struct dentry *lower_dir_dentry;
	struct dentry *lower_dentry;
	struct vfsmount *lower_mnt;
	char *encoded_name;
	int encoded_namelen;
	struct ecryptfs_crypt_stat *crypt_stat = NULL;
	struct ecryptfs_mount_crypt_stat *mount_crypt_stat;
	char *page_virt = NULL;
	struct inode *lower_inode;
	u64 file_size;

	lower_dir_dentry = ecryptfs_dentry_to_lower(dentry->d_parent);
	dentry->d_op = &ecryptfs_dops;
	if ((dentry->d_name.len == 1 && !strcmp(dentry->d_name.name, "."))
	    || (dentry->d_name.len == 2
		&& !strcmp(dentry->d_name.name, ".."))) {
		d_drop(dentry);
		goto out;
	}
	encoded_namelen = ecryptfs_encode_filename(crypt_stat,
						   dentry->d_name.name,
						   dentry->d_name.len,
						   &encoded_name);
	if (encoded_namelen < 0) {
		rc = encoded_namelen;
		d_drop(dentry);
		goto out;
	}
	ecryptfs_printk(KERN_DEBUG, "encoded_name = [%s]; encoded_namelen "
			"= [%d]\n", encoded_name, encoded_namelen);
	lower_dentry = lookup_one_len(encoded_name, lower_dir_dentry,
				      encoded_namelen - 1);
	kfree(encoded_name);
	if (IS_ERR(lower_dentry)) {
		ecryptfs_printk(KERN_ERR, "ERR from lower_dentry\n");
		rc = PTR_ERR(lower_dentry);
		d_drop(dentry);
		goto out;
	}
	lower_mnt = mntget(ecryptfs_dentry_to_lower_mnt(dentry->d_parent));
	ecryptfs_printk(KERN_DEBUG, "lower_dentry = [%p]; lower_dentry->"
       		"d_name.name = [%s]\n", lower_dentry,
		lower_dentry->d_name.name);
	lower_inode = lower_dentry->d_inode;
	fsstack_copy_attr_atime(dir, lower_dir_dentry->d_inode);
	BUG_ON(!atomic_read(&lower_dentry->d_count));
	ecryptfs_set_dentry_private(dentry,
				    kmem_cache_alloc(ecryptfs_dentry_info_cache,
						     GFP_KERNEL));
	if (!ecryptfs_dentry_to_private(dentry)) {
		rc = -ENOMEM;
		ecryptfs_printk(KERN_ERR, "Out of memory whilst attempting "
				"to allocate ecryptfs_dentry_info struct\n");
		goto out_dput;
	}
	ecryptfs_set_dentry_lower(dentry, lower_dentry);
	ecryptfs_set_dentry_lower_mnt(dentry, lower_mnt);
	if (!lower_dentry->d_inode) {
		/* We want to add because we couldn't find in lower */
		d_add(dentry, NULL);
		goto out;
	}
	rc = ecryptfs_interpose(lower_dentry, dentry, dir->i_sb,
				ECRYPTFS_INTERPOSE_FLAG_D_ADD);
	if (rc) {
		ecryptfs_printk(KERN_ERR, "Error interposing\n");
		goto out;
	}
	if (S_ISDIR(lower_inode->i_mode)) {
		ecryptfs_printk(KERN_DEBUG, "Is a directory; returning\n");
		goto out;
	}
	if (S_ISLNK(lower_inode->i_mode)) {
		ecryptfs_printk(KERN_DEBUG, "Is a symlink; returning\n");
		goto out;
	}
	if (special_file(lower_inode->i_mode)) {
		ecryptfs_printk(KERN_DEBUG, "Is a special file; returning\n");
		goto out;
	}
	if (!nd) {
		ecryptfs_printk(KERN_DEBUG, "We have a NULL nd, just leave"
				"as we *think* we are about to unlink\n");
		goto out;
	}
	/* Released in this function */
	page_virt = kmem_cache_zalloc(ecryptfs_header_cache_2,
				      GFP_USER);
	if (!page_virt) {
		rc = -ENOMEM;
		ecryptfs_printk(KERN_ERR,
				"Cannot ecryptfs_kmalloc a page\n");
		goto out;
	}
	crypt_stat = &ecryptfs_inode_to_private(dentry->d_inode)->crypt_stat;
	if (!(crypt_stat->flags & ECRYPTFS_POLICY_APPLIED))
		ecryptfs_set_default_sizes(crypt_stat);
	if (!ecryptfs_inode_to_private(dentry->d_inode)->lower_file) {
		rc = ecryptfs_init_persistent_file(dentry);
		if (rc) {
			printk(KERN_ERR "%s: Error attempting to initialize "
			       "the persistent file for the dentry with name "
			       "[%s]; rc = [%d]\n", __func__,
			       dentry->d_name.name, rc);
			goto out;
		}
	}
	rc = ecryptfs_read_and_validate_header_region(page_virt,
						      dentry->d_inode);
	if (rc) {
		rc = ecryptfs_read_and_validate_xattr_region(page_virt, dentry);
		if (rc) {
			printk(KERN_DEBUG "Valid metadata not found in header "
			       "region or xattr region; treating file as "
			       "unencrypted\n");
			rc = 0;
			kmem_cache_free(ecryptfs_header_cache_2, page_virt);
			goto out;
		}
		crypt_stat->flags |= ECRYPTFS_METADATA_IN_XATTR;
	}
	mount_crypt_stat = &ecryptfs_superblock_to_private(
		dentry->d_sb)->mount_crypt_stat;
	if (mount_crypt_stat->flags & ECRYPTFS_ENCRYPTED_VIEW_ENABLED) {
		if (crypt_stat->flags & ECRYPTFS_METADATA_IN_XATTR)
			file_size = (crypt_stat->num_header_bytes_at_front
				     + i_size_read(lower_dentry->d_inode));
		else
			file_size = i_size_read(lower_dentry->d_inode);
	} else {
		file_size = get_unaligned_be64(page_virt);
	}
	i_size_write(dentry->d_inode, (loff_t)file_size);
	kmem_cache_free(ecryptfs_header_cache_2, page_virt);
	goto out;

out_dput:
	dput(lower_dentry);
	d_drop(dentry);
out:
	return ERR_PTR(rc);
}
Beispiel #4
0
/**
 * ecryptfs_lookup_interpose - Dentry interposition for a lookup
 */
static int ecryptfs_lookup_interpose(struct dentry *dentry,
				     struct dentry *lower_dentry,
				     struct inode *dir_inode)
{
	struct inode *inode, *lower_inode = lower_dentry->d_inode;
	struct ecryptfs_dentry_info *dentry_info;
	struct vfsmount *lower_mnt;
	int rc = 0;

	lower_mnt = mntget(ecryptfs_dentry_to_lower_mnt(dentry->d_parent));
	fsstack_copy_attr_atime(dir_inode, lower_dentry->d_parent->d_inode);
	BUG_ON(!lower_dentry->d_count);

	dentry_info = kmem_cache_alloc(ecryptfs_dentry_info_cache, GFP_KERNEL);
	ecryptfs_set_dentry_private(dentry, dentry_info);
	if (!dentry_info) {
		printk(KERN_ERR "%s: Out of memory whilst attempting "
		       "to allocate ecryptfs_dentry_info struct\n",
			__func__);
		dput(lower_dentry);
		mntput(lower_mnt);
		d_drop(dentry);
		return -ENOMEM;
	}
	ecryptfs_set_dentry_lower(dentry, lower_dentry);
	ecryptfs_set_dentry_lower_mnt(dentry, lower_mnt);

	if (!lower_dentry->d_inode) {
		/* We want to add because we couldn't find in lower */
		d_add(dentry, NULL);
		return 0;
	}
	inode = __ecryptfs_get_inode(lower_inode, dir_inode->i_sb);
	if (IS_ERR(inode)) {
		printk(KERN_ERR "%s: Error interposing; rc = [%ld]\n",
		       __func__, PTR_ERR(inode));
		return PTR_ERR(inode);
	}
	if (S_ISREG(inode->i_mode)) {
		rc = ecryptfs_i_size_read(dentry, inode);
		if (rc) {
			make_bad_inode(inode);
			return rc;
		}
	}

#ifdef CONFIG_SDP
	if (S_ISDIR(inode->i_mode) && dentry) {
	    if(IS_UNDER_ROOT(dentry)) {
	        struct ecryptfs_mount_crypt_stat *mount_crypt_stat  =
	                &ecryptfs_superblock_to_private(inode->i_sb)->mount_crypt_stat;
	        printk("Lookup a directoy under root directory of current partition.\n");

	        if(is_chamber_directory(mount_crypt_stat, (char *)dentry->d_name.name)) {
	            /*
	             * When this directory is under ROOT directory and the name is registered
	             * as Chamber.
	             */
	            printk("This is a chamber directory\n");
	            set_chamber_flag(inode);
	        }
	    } else if(IS_SENSITIVE_DENTRY(dentry->d_parent)) {
	        /*
	         * When parent directory is sensitive
	         */
	        struct ecryptfs_crypt_stat *crypt_stat =
	                &ecryptfs_inode_to_private(inode)->crypt_stat;

	        printk("Parent %s is sensitive. so this directory is sensitive too\n",
	                dentry->d_parent->d_name.name);
	        crypt_stat->flags |= ECRYPTFS_DEK_IS_SENSITIVE;
	    }
	}
#endif

	if (inode->i_state & I_NEW)
		unlock_new_inode(inode);
	d_add(dentry, inode);

	return rc;
}
Beispiel #5
0
static int find_fsync_dnodes(struct f2fs_sb_info *sbi, struct list_head *head)
{
	unsigned long long cp_ver = cur_cp_version(F2FS_CKPT(sbi));
	struct curseg_info *curseg;
	struct page *page = NULL;
	block_t blkaddr;
	int err = 0;

	/* get node pages in the current segment */
	curseg = CURSEG_I(sbi, CURSEG_WARM_NODE);
	blkaddr = NEXT_FREE_BLKADDR(sbi, curseg);

	ra_meta_pages(sbi, blkaddr, 1, META_POR);

	while (1) {
		struct fsync_inode_entry *entry;

		if (blkaddr < MAIN_BLKADDR(sbi) || blkaddr >= MAX_BLKADDR(sbi))
			return 0;

		page = get_meta_page(sbi, blkaddr);

		if (cp_ver != cpver_of_node(page))
			break;

		if (!is_fsync_dnode(page))
			goto next;

		entry = get_fsync_inode(head, ino_of_node(page));
		if (entry) {
			if (IS_INODE(page) && is_dent_dnode(page))
				set_inode_flag(F2FS_I(entry->inode),
							FI_INC_LINK);
		} else {
			if (IS_INODE(page) && is_dent_dnode(page)) {
				err = recover_inode_page(sbi, page);
				if (err)
					break;
			}

			/* add this fsync inode to the list */
			entry = kmem_cache_alloc(fsync_entry_slab, GFP_F2FS_ZERO);
			if (!entry) {
				err = -ENOMEM;
				break;
			}
			/*
			 * CP | dnode(F) | inode(DF)
			 * For this case, we should not give up now.
			 */
			entry->inode = f2fs_iget(sbi->sb, ino_of_node(page));
			if (IS_ERR(entry->inode)) {
				err = PTR_ERR(entry->inode);
				kmem_cache_free(fsync_entry_slab, entry);
				if (err == -ENOENT)
					goto next;
				break;
			}
			list_add_tail(&entry->list, head);
		}
		entry->blkaddr = blkaddr;

		if (IS_INODE(page)) {
			entry->last_inode = blkaddr;
			if (is_dent_dnode(page))
				entry->last_dentry = blkaddr;
		}
next:
		/* check next segment */
		blkaddr = next_blkaddr_of_node(page);
		f2fs_put_page(page, 1);

		ra_meta_pages_cond(sbi, blkaddr);
	}
	f2fs_put_page(page, 1);
	return err;
}
Beispiel #6
0
/*
 * A commonly used alloc and free fn.
 */
void *mempool_alloc_slab(gfp_t gfp_mask, void *pool_data)
{
	kmem_cache_t *mem = (kmem_cache_t *) pool_data;
	return kmem_cache_alloc(mem, gfp_mask);
}
pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
{
	return kmem_cache_alloc(pmd_cachep, PGALLOC_GFP);
}
Beispiel #8
0
struct fasync_struct *fasync_alloc(void)
{
	return kmem_cache_alloc(fasync_cache, GFP_KERNEL);
}
Beispiel #9
0
/*
 * Allocate element for mempools requiring GFP_DMA flag.
 * Otherwise, checks in kmem_flagcheck() hit BUG_ON().
 */
static void *fnic_alloc_slab_dma(gfp_t gfp_mask, void *pool_data)
{
	struct kmem_cache *mem = pool_data;

	return kmem_cache_alloc(mem, gfp_mask | GFP_ATOMIC | GFP_DMA);
}
Beispiel #10
0
/*
 * Decodes share information in an nvlist format into a smb_kshare_t
 * structure.
 *
 * This is a temporary function and will be replaced by functions
 * provided by libsharev2 code after it's available.
 */
static smb_kshare_t *
smb_kshare_decode(nvlist_t *share)
{
	smb_kshare_t tmp;
	smb_kshare_t *shr;
	nvlist_t *smb;
	char *csc_name = NULL;
	int rc;

	ASSERT(share);

	bzero(&tmp, sizeof (smb_kshare_t));

	rc = nvlist_lookup_string(share, "name", &tmp.shr_name);
	rc |= nvlist_lookup_string(share, "path", &tmp.shr_path);
	(void) nvlist_lookup_string(share, "desc", &tmp.shr_cmnt);

	ASSERT(tmp.shr_name && tmp.shr_path);

	rc |= nvlist_lookup_nvlist(share, "smb", &smb);
	if (rc != 0) {
		cmn_err(CE_WARN, "kshare: failed looking up SMB properties"
		    " (%d)", rc);
		return (NULL);
	}

	rc = nvlist_lookup_uint32(smb, "type", &tmp.shr_type);
	if (rc != 0) {
		cmn_err(CE_WARN, "kshare[%s]: failed getting the share type"
		    " (%d)", tmp.shr_name, rc);
		return (NULL);
	}

	(void) nvlist_lookup_string(smb, SHOPT_AD_CONTAINER,
	    &tmp.shr_container);
	(void) nvlist_lookup_string(smb, SHOPT_NONE, &tmp.shr_access_none);
	(void) nvlist_lookup_string(smb, SHOPT_RO, &tmp.shr_access_ro);
	(void) nvlist_lookup_string(smb, SHOPT_RW, &tmp.shr_access_rw);

	tmp.shr_flags |= smb_kshare_decode_bool(smb, SHOPT_ABE, SMB_SHRF_ABE);
	tmp.shr_flags |= smb_kshare_decode_bool(smb, SHOPT_CATIA,
	    SMB_SHRF_CATIA);
	tmp.shr_flags |= smb_kshare_decode_bool(smb, SHOPT_GUEST,
	    SMB_SHRF_GUEST_OK);
	tmp.shr_flags |= smb_kshare_decode_bool(smb, SHOPT_DFSROOT,
	    SMB_SHRF_DFSROOT);
	tmp.shr_flags |= smb_kshare_decode_bool(smb, "Autohome",
	    SMB_SHRF_AUTOHOME);

	if ((tmp.shr_flags & SMB_SHRF_AUTOHOME) == SMB_SHRF_AUTOHOME) {
		rc = nvlist_lookup_uint32(smb, "uid", &tmp.shr_uid);
		rc |= nvlist_lookup_uint32(smb, "gid", &tmp.shr_gid);
		if (rc != 0) {
			cmn_err(CE_WARN, "kshare: failed looking up uid/gid"
			    " (%d)", rc);
			return (NULL);
		}
	}

	(void) nvlist_lookup_string(smb, SHOPT_CSC, &csc_name);
	smb_kshare_csc_flags(&tmp, csc_name);

	shr = kmem_cache_alloc(smb_kshare_cache_share, KM_SLEEP);
	bzero(shr, sizeof (smb_kshare_t));

	shr->shr_magic = SMB_SHARE_MAGIC;
	shr->shr_refcnt = 1;

	shr->shr_name = smb_mem_strdup(tmp.shr_name);
	shr->shr_path = smb_mem_strdup(tmp.shr_path);
	if (tmp.shr_cmnt)
		shr->shr_cmnt = smb_mem_strdup(tmp.shr_cmnt);
	if (tmp.shr_container)
		shr->shr_container = smb_mem_strdup(tmp.shr_container);
	if (tmp.shr_access_none)
		shr->shr_access_none = smb_mem_strdup(tmp.shr_access_none);
	if (tmp.shr_access_ro)
		shr->shr_access_ro = smb_mem_strdup(tmp.shr_access_ro);
	if (tmp.shr_access_rw)
		shr->shr_access_rw = smb_mem_strdup(tmp.shr_access_rw);

	shr->shr_oemname = smb_kshare_oemname(shr->shr_name);
	shr->shr_flags = tmp.shr_flags | smb_kshare_is_admin(shr->shr_name);
	shr->shr_type = tmp.shr_type | smb_kshare_is_special(shr->shr_name);

	shr->shr_uid = tmp.shr_uid;
	shr->shr_gid = tmp.shr_gid;

	if ((shr->shr_flags & SMB_SHRF_AUTOHOME) == SMB_SHRF_AUTOHOME)
		shr->shr_autocnt = 1;

	return (shr);
}
Beispiel #11
0
/*
 * initialize a newly allocated inode.
 */
struct inode *ceph_alloc_inode(struct super_block *sb)
{
	struct ceph_inode_info *ci;
	int i;

	ci = kmem_cache_alloc(ceph_inode_cachep, GFP_NOFS);
	if (!ci)
		return NULL;

	dout("alloc_inode %p\n", &ci->vfs_inode);

	ci->i_version = 0;
	ci->i_time_warp_seq = 0;
	ci->i_ceph_flags = 0;
	ci->i_release_count = 0;
	ci->i_symlink = NULL;

	ci->i_fragtree = RB_ROOT;
	mutex_init(&ci->i_fragtree_mutex);

	ci->i_xattrs.blob = NULL;
	ci->i_xattrs.prealloc_blob = NULL;
	ci->i_xattrs.dirty = false;
	ci->i_xattrs.index = RB_ROOT;
	ci->i_xattrs.count = 0;
	ci->i_xattrs.names_size = 0;
	ci->i_xattrs.vals_size = 0;
	ci->i_xattrs.version = 0;
	ci->i_xattrs.index_version = 0;

	ci->i_caps = RB_ROOT;
	ci->i_auth_cap = NULL;
	ci->i_dirty_caps = 0;
	ci->i_flushing_caps = 0;
	INIT_LIST_HEAD(&ci->i_dirty_item);
	INIT_LIST_HEAD(&ci->i_flushing_item);
	ci->i_cap_flush_seq = 0;
	ci->i_cap_flush_last_tid = 0;
	memset(&ci->i_cap_flush_tid, 0, sizeof(ci->i_cap_flush_tid));
	init_waitqueue_head(&ci->i_cap_wq);
	ci->i_hold_caps_min = 0;
	ci->i_hold_caps_max = 0;
	INIT_LIST_HEAD(&ci->i_cap_delay_list);
	ci->i_cap_exporting_mds = 0;
	ci->i_cap_exporting_mseq = 0;
	ci->i_cap_exporting_issued = 0;
	INIT_LIST_HEAD(&ci->i_cap_snaps);
	ci->i_head_snapc = NULL;
	ci->i_snap_caps = 0;

	for (i = 0; i < CEPH_FILE_MODE_NUM; i++)
		ci->i_nr_by_mode[i] = 0;

	ci->i_truncate_seq = 0;
	ci->i_truncate_size = 0;
	ci->i_truncate_pending = 0;

	ci->i_max_size = 0;
	ci->i_reported_size = 0;
	ci->i_wanted_max_size = 0;
	ci->i_requested_max_size = 0;

	ci->i_pin_ref = 0;
	ci->i_rd_ref = 0;
	ci->i_rdcache_ref = 0;
	ci->i_wr_ref = 0;
	ci->i_wrbuffer_ref = 0;
	ci->i_wrbuffer_ref_head = 0;
	ci->i_shared_gen = 0;
	ci->i_rdcache_gen = 0;
	ci->i_rdcache_revoking = 0;

	INIT_LIST_HEAD(&ci->i_unsafe_writes);
	INIT_LIST_HEAD(&ci->i_unsafe_dirops);
	spin_lock_init(&ci->i_unsafe_lock);

	ci->i_snap_realm = NULL;
	INIT_LIST_HEAD(&ci->i_snap_realm_item);
	INIT_LIST_HEAD(&ci->i_snap_flush_item);

	INIT_WORK(&ci->i_wb_work, ceph_writeback_work);
	INIT_WORK(&ci->i_pg_inv_work, ceph_invalidate_work);

	INIT_WORK(&ci->i_vmtruncate_work, ceph_vmtruncate_work);

	return &ci->vfs_inode;
}
/*
 * Mark a range of blocks as belonging to the "system zone" --- that
 * is, filesystem metadata blocks which should never be used by
 * inodes.
 */
static int add_system_zone(struct ext4_sb_info *sbi,
			   ext4_fsblk_t start_blk,
			   unsigned int count)
{
	struct ext4_system_zone *new_entry = NULL, *entry;
	struct rb_node **n = &sbi->system_blks.rb_node, *node;
	struct rb_node *parent = NULL, *new_node = NULL;

	while (*n) {
		parent = *n;
		entry = rb_entry(parent, struct ext4_system_zone, node);
		if (start_blk < entry->start_blk)
			n = &(*n)->rb_left;
		else if (start_blk >= (entry->start_blk + entry->count))
			n = &(*n)->rb_right;
		else {
			if (start_blk + count > (entry->start_blk + 
						 entry->count))
				entry->count = (start_blk + count - 
						entry->start_blk);
			new_node = *n;
			new_entry = rb_entry(new_node, struct ext4_system_zone,
					     node);
			break;
		}
	}

	if (!new_entry) {
		new_entry = kmem_cache_alloc(ext4_system_zone_cachep,
					     GFP_KERNEL);
		if (!new_entry)
			return -ENOMEM;
		new_entry->start_blk = start_blk;
		new_entry->count = count;
		new_node = &new_entry->node;

		rb_link_node(new_node, parent, n);
		rb_insert_color(new_node, &sbi->system_blks);
	}

	/* Can we merge to the left? */
	node = rb_prev(new_node);
	if (node) {
		entry = rb_entry(node, struct ext4_system_zone, node);
		if (can_merge(entry, new_entry)) {
			new_entry->start_blk = entry->start_blk;
			new_entry->count += entry->count;
			rb_erase(node, &sbi->system_blks);
			kmem_cache_free(ext4_system_zone_cachep, entry);
		}
	}

	/* Can we merge to the right? */
	node = rb_next(new_node);
	if (node) {
		entry = rb_entry(node, struct ext4_system_zone, node);
		if (can_merge(new_entry, entry)) {
			new_entry->count += entry->count;
			rb_erase(node, &sbi->system_blks);
			kmem_cache_free(ext4_system_zone_cachep, entry);
		}
	}
	return 0;
}
Beispiel #13
0
struct iova *alloc_iova_mem(void)
{
	return kmem_cache_alloc(iommu_iova_cache, GFP_ATOMIC);
}
Beispiel #14
0
/*
 * start REPLY_TX command process
 */
int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
{
	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
	struct iwl_station_priv *sta_priv = NULL;
	struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
	struct iwl_device_cmd *dev_cmd = NULL;
	struct iwl_tx_cmd *tx_cmd;

	__le16 fc;
	u8 hdr_len;
	u16 len;
	u8 sta_id;
	unsigned long flags;
	bool is_agg = false;

	if (info->control.vif)
		ctx = iwl_rxon_ctx_from_vif(info->control.vif);

	spin_lock_irqsave(&priv->shrd->lock, flags);
	if (iwl_is_rfkill(priv->shrd)) {
		IWL_DEBUG_DROP(priv, "Dropping - RF KILL\n");
		goto drop_unlock_priv;
	}

	fc = hdr->frame_control;

#ifdef CONFIG_IWLWIFI_DEBUG
	if (ieee80211_is_auth(fc))
		IWL_DEBUG_TX(priv, "Sending AUTH frame\n");
	else if (ieee80211_is_assoc_req(fc))
		IWL_DEBUG_TX(priv, "Sending ASSOC frame\n");
	else if (ieee80211_is_reassoc_req(fc))
		IWL_DEBUG_TX(priv, "Sending REASSOC frame\n");
#endif

	if (unlikely(ieee80211_is_probe_resp(fc))) {
		struct iwl_wipan_noa_data *noa_data =
			rcu_dereference(priv->noa_data);

		if (noa_data &&
		    pskb_expand_head(skb, 0, noa_data->length,
				     GFP_ATOMIC) == 0) {
			memcpy(skb_put(skb, noa_data->length),
			       noa_data->data, noa_data->length);
			hdr = (struct ieee80211_hdr *)skb->data;
		}
	}

	hdr_len = ieee80211_hdrlen(fc);

	/* For management frames use broadcast id to do not break aggregation */
	if (!ieee80211_is_data(fc))
		sta_id = ctx->bcast_sta_id;
	else {
		/* Find index into station table for destination station */
		sta_id = iwl_sta_id_or_broadcast(priv, ctx, info->control.sta);
		if (sta_id == IWL_INVALID_STATION) {
			IWL_DEBUG_DROP(priv, "Dropping - INVALID STATION: %pM\n",
				       hdr->addr1);
			goto drop_unlock_priv;
		}
	}

	IWL_DEBUG_TX(priv, "station Id %d\n", sta_id);

	if (info->control.sta)
		sta_priv = (void *)info->control.sta->drv_priv;

	if (sta_priv && sta_priv->asleep &&
	    (info->flags & IEEE80211_TX_CTL_POLL_RESPONSE)) {
		/*
		 * This sends an asynchronous command to the device,
		 * but we can rely on it being processed before the
		 * next frame is processed -- and the next frame to
		 * this station is the one that will consume this
		 * counter.
		 * For now set the counter to just 1 since we do not
		 * support uAPSD yet.
		 */
		iwl_sta_modify_sleep_tx_count(priv, sta_id, 1);
	}

	if (info->flags & IEEE80211_TX_CTL_AMPDU)
		is_agg = true;

	/* irqs already disabled/saved above when locking priv->shrd->lock */
	spin_lock(&priv->shrd->sta_lock);

	dev_cmd = kmem_cache_alloc(priv->tx_cmd_pool, GFP_ATOMIC);

	if (unlikely(!dev_cmd))
		goto drop_unlock_sta;

	memset(dev_cmd, 0, sizeof(*dev_cmd));
	tx_cmd = (struct iwl_tx_cmd *) dev_cmd->payload;

	/* Total # bytes to be transmitted */
	len = (u16)skb->len;
	tx_cmd->len = cpu_to_le16(len);

	if (info->control.hw_key)
		iwlagn_tx_cmd_build_hwcrypto(priv, info, tx_cmd, skb, sta_id);

	/* TODO need this for burst mode later on */
	iwlagn_tx_cmd_build_basic(priv, skb, tx_cmd, info, hdr, sta_id);
	iwl_dbg_log_tx_data_frame(priv, len, hdr);

	iwlagn_tx_cmd_build_rate(priv, tx_cmd, info, fc);

	iwl_update_stats(priv, true, fc, len);

	memset(&info->status, 0, sizeof(info->status));

	info->driver_data[0] = ctx;
	info->driver_data[1] = dev_cmd;

	if (iwl_trans_tx(trans(priv), skb, dev_cmd, ctx->ctxid, sta_id))
		goto drop_unlock_sta;

	spin_unlock(&priv->shrd->sta_lock);
	spin_unlock_irqrestore(&priv->shrd->lock, flags);

	/*
	 * Avoid atomic ops if it isn't an associated client.
	 * Also, if this is a packet for aggregation, don't
	 * increase the counter because the ucode will stop
	 * aggregation queues when their respective station
	 * goes to sleep.
	 */
	if (sta_priv && sta_priv->client && !is_agg)
		atomic_inc(&sta_priv->pending_frames);

	return 0;

drop_unlock_sta:
	if (dev_cmd)
		kmem_cache_free(priv->tx_cmd_pool, dev_cmd);
	spin_unlock(&priv->shrd->sta_lock);
drop_unlock_priv:
	spin_unlock_irqrestore(&priv->shrd->lock, flags);
	return -1;
}
Beispiel #15
0
void
i686_kmain(unsigned long magic, multiboot_info_t *info) {

  bootvideo_cls();

  parse_cmdline(info->cmdline);

  if (use_serial)
    i686_tty_init(0, 9600);

  i686_kernel.debug = i686_debug;

  if (magic != MULTIBOOT_BOOTLOADER_MAGIC) {
    i686_debug("Not booted from multiboot loader!\n");
    while (1);
  }
 
  i686_debug("mods_addr: %x\nmod_start: %x\n", info->mods_addr,
      0);

  i686_kernel.mutex = &i686_mutex;
  i686_kernel.bsp = (struct cpu *)i686_cpu_alloc();
  i686_kernel.bsp->kvirt = i686_virtmem_init(&i686_kernel);
  i686_kernel.phys = i686_physmem_alloc(&i686_kernel, info);

  kmem_init(i686_kernel.bsp->allocator);
  i686_kernel.bsp->v.init(i686_kernel.bsp);

  i686_debug("Location GDT entry: %x\n", ((struct i686_cpu *)i686_kernel.bsp)->gdt);

  virtaddr_t a;
  physaddr_t p;
  virtmem_error_t e1 = virtmem_kernel_alloc(i686_kernel.bsp->kvirt, &a, 1);
  assert(e1 == VIRTMEM_SUCCESS);
  physmem_error_t e2 = physmem_page_alloc(i686_kernel.bsp->localmem, 0, &p);
  assert(e2 == PHYSMEM_SUCCESS);
  virtmem_kernel_map_virt_to_phys(i686_kernel.bsp->kvirt, p, a);
  i686_debug("Allocated address: %x(->%x)\n", a, p);

  char *s = (char *)a;

  strcpy(s, "This shows the validity of this memory");
  i686_debug("%x contains: %s\n", a, s);

  struct kmem_cache *s1 = kmem_alloc(i686_kernel.bsp->allocator);
  kmem_cache_init(i686_kernel.bsp->allocator,
      s1, i686_kernel.bsp, "test", 128, NULL, NULL);

  char *t1 = kmem_cache_alloc(s1);
  i686_debug("cache at %x provided us with %x\n", s1, t1);
  strcpy(t1, "This shows the validity of the slab allocation");
  i686_debug("%x contains: %s\n", t1, t1);

  i686_address_space_init();
  struct address_space *as;
  struct memory_region *mr;
  address_space_alloc(&as);
  memory_region_alloc(&mr);

  e1 = virtmem_kernel_alloc(i686_kernel.bsp->kvirt, &a, 1);
  virtmem_kernel_map_virt_to_phys(i686_kernel.bsp->kvirt, (physaddr_t)as->pd, a);
  
  address_space_init_region(as, mr, (virtaddr_t)0x1000000, 0x2000);
  memory_region_set_flags(mr, 1, 1);
  memory_region_map(as, mr, NULL);

  const char *teststr = "This is a test string to be copied to userspace.";
  char testcpybuf[128];
  char opcodes[] = {0xeb, 0xfe};
  virtmem_copy_kernel_to_user(i686_kernel.bsp->kvirt, as->pd, (void *)0x1000ffc, 
      (const void *)teststr, strlen(teststr) + 1);
  virtmem_copy_user_to_kernel(i686_kernel.bsp->kvirt, (void *)&testcpybuf, 
      as->pd, (const void *)0x1000ffc, strlen(teststr) + 1);
  i686_debug("testcpybuf contains '%s'\n", testcpybuf);
  virtmem_copy_kernel_to_user(i686_kernel.bsp->kvirt, as->pd, (void *)0x1000000, 
      (const void *)opcodes, 2);


  struct thread *thr1;
  scheduler_thread_alloc(cpu()->sched, &thr1);
  thread_init(thr1, as);
  thr1->state = THREAD_RUNNABLE;
  scheduler_thread_add(cpu()->sched, thr1);
  scheduler_reschedule(cpu()->sched);
  virtmem_user_setup_kernelspace(i686_kernel.bsp->kvirt, as->pd);
  virtmem_set_context(i686_kernel.bsp->kvirt, as->pd);
  scheduler_resume(cpu()->sched);
  while (1);
}
Beispiel #16
0
/**
 * ubi_add_to_av - add used physical eraseblock to the attaching information.
 * @ubi: UBI device description object
 * @ai: attaching information
 * @pnum: the physical eraseblock number
 * @ec: erase counter
 * @vid_hdr: the volume identifier header
 * @bitflips: if bit-flips were detected when this physical eraseblock was read
 *
 * This function adds information about a used physical eraseblock to the
 * 'used' tree of the corresponding volume. The function is rather complex
 * because it has to handle cases when this is not the first physical
 * eraseblock belonging to the same logical eraseblock, and the newer one has
 * to be picked, while the older one has to be dropped. This function returns
 * zero in case of success and a negative error code in case of failure.
 */
int ubi_add_to_av(struct ubi_device *ubi, struct ubi_attach_info *ai, int pnum,
		  int ec, const struct ubi_vid_hdr *vid_hdr, int bitflips)
{
	int err, vol_id, lnum;
	unsigned long long sqnum;
	struct ubi_ainf_volume *av;
	struct ubi_ainf_peb *aeb;
	struct rb_node **p, *parent = NULL;

	vol_id = be32_to_cpu(vid_hdr->vol_id);
	lnum = be32_to_cpu(vid_hdr->lnum);
	sqnum = be64_to_cpu(vid_hdr->sqnum);

	dbg_bld("PEB %d, LEB %d:%d, EC %d, sqnum %llu, bitflips %d",
		pnum, vol_id, lnum, ec, sqnum, bitflips);

	av = add_volume(ai, vol_id, pnum, vid_hdr);
	if (IS_ERR(av))
		return PTR_ERR(av);

	if (ai->max_sqnum < sqnum)
		ai->max_sqnum = sqnum;

	/*
	 * Walk the RB-tree of logical eraseblocks of volume @vol_id to look
	 * if this is the first instance of this logical eraseblock or not.
	 */
	p = &av->root.rb_node;
	while (*p) {
		int cmp_res;

		parent = *p;
		aeb = rb_entry(parent, struct ubi_ainf_peb, u.rb);
		if (lnum != aeb->lnum) {
			if (lnum < aeb->lnum)
				p = &(*p)->rb_left;
			else
				p = &(*p)->rb_right;
			continue;
		}

		/*
		 * There is already a physical eraseblock describing the same
		 * logical eraseblock present.
		 */

		dbg_bld("this LEB already exists: PEB %d, sqnum %llu, EC %d",
			aeb->pnum, aeb->sqnum, aeb->ec);

		/*
		 * Make sure that the logical eraseblocks have different
		 * sequence numbers. Otherwise the image is bad.
		 *
		 * However, if the sequence number is zero, we assume it must
		 * be an ancient UBI image from the era when UBI did not have
		 * sequence numbers. We still can attach these images, unless
		 * there is a need to distinguish between old and new
		 * eraseblocks, in which case we'll refuse the image in
		 * 'ubi_compare_lebs()'. In other words, we attach old clean
		 * images, but refuse attaching old images with duplicated
		 * logical eraseblocks because there was an unclean reboot.
		 */
		if (aeb->sqnum == sqnum && sqnum != 0) {
			ubi_err(ubi, "two LEBs with same sequence number %llu",
				sqnum);
			ubi_dump_aeb(aeb, 0);
			ubi_dump_vid_hdr(vid_hdr);
			return -EINVAL;
		}

		/*
		 * Now we have to drop the older one and preserve the newer
		 * one.
		 */
		cmp_res = ubi_compare_lebs(ubi, aeb, pnum, vid_hdr);
		if (cmp_res < 0)
			return cmp_res;

		if (cmp_res & 1) {
			/*
			 * This logical eraseblock is newer than the one
			 * found earlier.
			 */
			err = validate_vid_hdr(ubi, vid_hdr, av, pnum);
			if (err)
				return err;

			err = add_to_list(ai, aeb->pnum, aeb->vol_id,
					  aeb->lnum, aeb->ec, cmp_res & 4,
					  &ai->erase);
			if (err)
				return err;

			aeb->ec = ec;
			aeb->pnum = pnum;
			aeb->vol_id = vol_id;
			aeb->lnum = lnum;
			aeb->scrub = ((cmp_res & 2) || bitflips);
			aeb->copy_flag = vid_hdr->copy_flag;
			aeb->sqnum = sqnum;

			if (av->highest_lnum == lnum)
				av->last_data_size =
					be32_to_cpu(vid_hdr->data_size);

			return 0;
		} else {
			/*
			 * This logical eraseblock is older than the one found
			 * previously.
			 */
			return add_to_list(ai, pnum, vol_id, lnum, ec,
					   cmp_res & 4, &ai->erase);
		}
	}

	/*
	 * We've met this logical eraseblock for the first time, add it to the
	 * attaching information.
	 */

	err = validate_vid_hdr(ubi, vid_hdr, av, pnum);
	if (err)
		return err;

	aeb = kmem_cache_alloc(ai->aeb_slab_cache, GFP_KERNEL);
	if (!aeb)
		return -ENOMEM;

	aeb->ec = ec;
	aeb->pnum = pnum;
	aeb->vol_id = vol_id;
	aeb->lnum = lnum;
	aeb->scrub = bitflips;
	aeb->copy_flag = vid_hdr->copy_flag;
	aeb->sqnum = sqnum;

	if (av->highest_lnum <= lnum) {
		av->highest_lnum = lnum;
		av->last_data_size = be32_to_cpu(vid_hdr->data_size);
	}

	av->leb_count += 1;
	rb_link_node(&aeb->u.rb, parent, p);
	rb_insert_color(&aeb->u.rb, &av->root);
	return 0;
}
Beispiel #17
0
/*
 * vq_repbuf_alloc - allocate a VQ Reply Buffer.
 */
void *vq_repbuf_alloc(struct c2_dev *c2dev)
{
	return kmem_cache_alloc(c2dev->host_msg_cache, GFP_ATOMIC);
}
int fcntl_dirnotify(int fd, struct file *filp, unsigned long arg)
{
	struct dnotify_struct *dn;
	struct dnotify_struct *odn;
	struct dnotify_struct **prev;
	struct inode *inode;
	fl_owner_t id = current->files;
	struct file *f;
	int error = 0;

	if ((arg & ~DN_MULTISHOT) == 0) {
		dnotify_flush(filp, id);
		return 0;
	}
	if (!dir_notify_enable)
		return -EINVAL;
	inode = filp->f_path.dentry->d_inode;
	if (!S_ISDIR(inode->i_mode))
		return -ENOTDIR;
	dn = kmem_cache_alloc(dn_cache, GFP_KERNEL);
	if (dn == NULL)
		return -ENOMEM;
	spin_lock(&inode->i_lock);
	prev = &inode->i_dnotify;
	while ((odn = *prev) != NULL) {
		if ((odn->dn_owner == id) && (odn->dn_filp == filp)) {
			odn->dn_fd = fd;
			odn->dn_mask |= arg;
			inode->i_dnotify_mask |= arg & ~DN_MULTISHOT;
			goto out_free;
		}
		prev = &odn->dn_next;
	}

	rcu_read_lock();
	f = fcheck(fd);
	rcu_read_unlock();
	/* we'd lost the race with close(), sod off silently */
	/* note that inode->i_lock prevents reordering problems
	 * between accesses to descriptor table and ->i_dnotify */
	if (f != filp)
		goto out_free;

	error = __f_setown(filp, task_pid(current), PIDTYPE_PID, 0);
	if (error)
		goto out_free;

	dn->dn_mask = arg;
	dn->dn_fd = fd;
	dn->dn_filp = filp;
	dn->dn_owner = id;
	inode->i_dnotify_mask |= arg & ~DN_MULTISHOT;
	dn->dn_next = inode->i_dnotify;
	inode->i_dnotify = dn;
	spin_unlock(&inode->i_lock);

	if (filp->f_op && filp->f_op->dir_notify)
		return filp->f_op->dir_notify(filp, arg);
	return 0;

out_free:
	spin_unlock(&inode->i_lock);
	kmem_cache_free(dn_cache, dn);
	return error;
}
pgd_t *pgd_alloc(struct mm_struct *mm)
{
	return kmem_cache_alloc(pgd_cachep, PGALLOC_GFP);
}
Beispiel #20
0
struct bio_vec *bvec_alloc(gfp_t gfp_mask, int nr, unsigned long *idx,
			   mempool_t *pool)
{
	struct bio_vec *bvl;

	/*
	 * see comment near bvec_array define!
	 */
	switch (nr) {
	case 1:
		*idx = 0;
		break;
	case 2 ... 4:
		*idx = 1;
		break;
	case 5 ... 16:
		*idx = 2;
		break;
	case 17 ... 64:
		*idx = 3;
		break;
	case 65 ... 128:
		*idx = 4;
		break;
	case 129 ... BIO_MAX_PAGES:
		*idx = 5;
		break;
	default:
		return NULL;
	}

	/*
	 * idx now points to the pool we want to allocate from. only the
	 * 1-vec entry pool is mempool backed.
	 */
	if (*idx == BIOVEC_MAX_IDX) {
fallback:
		bvl = mempool_alloc(pool, gfp_mask);
	} else {
		struct biovec_slab *bvs = bvec_slabs + *idx;
		gfp_t __gfp_mask = gfp_mask & ~(__GFP_WAIT | __GFP_IO);

		/*
		 * Make this allocation restricted and don't dump info on
		 * allocation failures, since we'll fallback to the mempool
		 * in case of failure.
		 */
		__gfp_mask |= __GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN;

		/*
		 * Try a slab allocation. If this fails and __GFP_WAIT
		 * is set, retry with the 1-entry mempool
		 */
		bvl = kmem_cache_alloc(bvs->slab, __gfp_mask);
		if (unlikely(!bvl && (gfp_mask & __GFP_WAIT))) {
			*idx = BIOVEC_MAX_IDX;
			goto fallback;
		}
	}

	return bvl;
}
Beispiel #21
0
static int copy_signal(unsigned long clone_flags, struct task_struct *tsk)
{
	struct signal_struct *sig;

	if (clone_flags & CLONE_THREAD)
		return 0;

	sig = kmem_cache_alloc(signal_cachep, GFP_KERNEL);
	tsk->signal = sig;
	if (!sig)
		return -ENOMEM;

	atomic_set(&sig->count, 1);
	atomic_set(&sig->live, 1);
	init_waitqueue_head(&sig->wait_chldexit);
	sig->flags = 0;
	if (clone_flags & CLONE_NEWPID)
		sig->flags |= SIGNAL_UNKILLABLE;
	sig->group_exit_code = 0;
	sig->group_exit_task = NULL;
	sig->group_stop_count = 0;
	sig->curr_target = tsk;
	init_sigpending(&sig->shared_pending);
	INIT_LIST_HEAD(&sig->posix_timers);

	hrtimer_init(&sig->real_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
	sig->it_real_incr.tv64 = 0;
	sig->real_timer.function = it_real_fn;

	sig->leader = 0;	/* session leadership doesn't inherit */
	sig->tty_old_pgrp = NULL;
	sig->tty = NULL;

	sig->utime = sig->stime = sig->cutime = sig->cstime = cputime_zero;
	sig->gtime = cputime_zero;
	sig->cgtime = cputime_zero;
#ifndef CONFIG_VIRT_CPU_ACCOUNTING
	sig->prev_utime = sig->prev_stime = cputime_zero;
#endif
	sig->nvcsw = sig->nivcsw = sig->cnvcsw = sig->cnivcsw = 0;
	sig->min_flt = sig->maj_flt = sig->cmin_flt = sig->cmaj_flt = 0;
	sig->inblock = sig->oublock = sig->cinblock = sig->coublock = 0;
	sig->maxrss = sig->cmaxrss = 0;
	task_io_accounting_init(&sig->ioac);
	sig->sum_sched_runtime = 0;
	taskstats_tgid_init(sig);

	task_lock(current->group_leader);
	memcpy(sig->rlim, current->signal->rlim, sizeof sig->rlim);
	task_unlock(current->group_leader);

	posix_cpu_timers_init_group(sig);

	acct_init_pacct(&sig->pacct);

	tty_audit_fork(sig);

	sig->oom_adj = current->signal->oom_adj;

	return 0;
}
Beispiel #22
0
    .owner = THIS_MODULE,
    .llseek = dispatch_llseek,
    .read = dispatch_read,
    .unlocked_ioctl = dispatch_ioctl,
    .open = phys_mem_open,
    .release = phys_mem_release,
    .mmap = dispatch_mmap,
};

首先来看open函数,open函数为phys_mem_open,其主要作用是创建一个
session,并将其初始化,将该session与filp相关联,核心代码如下:

    dev = container_of(inode->i_cdev, struct phys_mem_dev, cdev);

	/* 从slab分配器中分配一个session */
    session = kmem_cache_alloc(session_mem_cache, GFP_KERNEL);
	...	
	/* 初始化session */
    session->session_id = atomic64_add_return(1, &session_counter);
    session->device = dev;
    sema_init(&session->sem, 1);
    session->vmas = 0;
    session->num_frame_stati = 0;
    session->frame_stati = NULL;
    session->status.state = SESSION_STATE_INVALID;

    SET_STATE(session, SESSION_STATE_OPEN);

    /* 关联filp与session */
    filp->private_data = session;
Beispiel #23
0
static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
{
	struct vm_area_struct *mpnt, *tmp, *prev, **pprev;
	struct rb_node **rb_link, *rb_parent;
	int retval;
	unsigned long charge;
	struct mempolicy *pol;

	down_write(&oldmm->mmap_sem);
	flush_cache_dup_mm(oldmm);
	/*
	 * Not linked in yet - no deadlock potential:
	 */
	down_write_nested(&mm->mmap_sem, SINGLE_DEPTH_NESTING);

	mm->locked_vm = 0;
	mm->mmap = NULL;
	mm->mmap_cache = NULL;
	mm->free_area_cache = oldmm->mmap_base;
	mm->cached_hole_size = ~0UL;
	mm->map_count = 0;
	cpumask_clear(mm_cpumask(mm));
	mm->mm_rb = RB_ROOT;
	rb_link = &mm->mm_rb.rb_node;
	rb_parent = NULL;
	pprev = &mm->mmap;
	retval = ksm_fork(mm, oldmm);
	if (retval)
		goto out;
	retval = khugepaged_fork(mm, oldmm);
	if (retval)
		goto out;

	prev = NULL;
	for (mpnt = oldmm->mmap; mpnt; mpnt = mpnt->vm_next) {
		struct file *file;

		if (mpnt->vm_flags & VM_DONTCOPY) {
			long pages = vma_pages(mpnt);
			mm->total_vm -= pages;
			vm_stat_account(mm, mpnt->vm_flags, mpnt->vm_file,
								-pages);
			continue;
		}
		charge = 0;
		if (mpnt->vm_flags & VM_ACCOUNT) {
			unsigned long len;
			len = (mpnt->vm_end - mpnt->vm_start) >> PAGE_SHIFT;
			if (security_vm_enough_memory(len))
				goto fail_nomem;
			charge = len;
		}
		tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
		if (!tmp)
			goto fail_nomem;
		*tmp = *mpnt;
		INIT_LIST_HEAD(&tmp->anon_vma_chain);
		pol = mpol_dup(vma_policy(mpnt));
		retval = PTR_ERR(pol);
		if (IS_ERR(pol))
			goto fail_nomem_policy;
		vma_set_policy(tmp, pol);
		tmp->vm_mm = mm;
		if (anon_vma_fork(tmp, mpnt))
			goto fail_nomem_anon_vma_fork;
		tmp->vm_flags &= ~VM_LOCKED;
		tmp->vm_next = tmp->vm_prev = NULL;
		file = tmp->vm_file;
		if (file) {
			struct inode *inode = file->f_path.dentry->d_inode;
			struct address_space *mapping = file->f_mapping;

			vma_get_file(tmp);
			if (tmp->vm_flags & VM_DENYWRITE)
				atomic_dec(&inode->i_writecount);
			mutex_lock(&mapping->i_mmap_mutex);
			if (tmp->vm_flags & VM_SHARED)
				mapping->i_mmap_writable++;
			flush_dcache_mmap_lock(mapping);
			/* insert tmp into the share list, just after mpnt */
			vma_prio_tree_add(tmp, mpnt);
			flush_dcache_mmap_unlock(mapping);
			mutex_unlock(&mapping->i_mmap_mutex);
		}

		/*
		 * Clear hugetlb-related page reserves for children. This only
		 * affects MAP_PRIVATE mappings. Faults generated by the child
		 * are not guaranteed to succeed, even if read-only
		 */
		if (is_vm_hugetlb_page(tmp))
			reset_vma_resv_huge_pages(tmp);

		/*
		 * Link in the new vma and copy the page table entries.
		 */
		*pprev = tmp;
		pprev = &tmp->vm_next;
		tmp->vm_prev = prev;
		prev = tmp;

		__vma_link_rb(mm, tmp, rb_link, rb_parent);
		rb_link = &tmp->vm_rb.rb_right;
		rb_parent = &tmp->vm_rb;

		mm->map_count++;
		retval = copy_page_range(mm, oldmm, mpnt);

		if (tmp->vm_ops && tmp->vm_ops->open)
			tmp->vm_ops->open(tmp);

		if (retval)
			goto out;
	}
Beispiel #24
0
/*
 * function to xmit  Single packet over the wire
 *
 * wq - pointer to WQ
 * mp - Pointer to packet chain
 *
 * return pointer to the packet
 */
mblk_t *
oce_send_packet(struct oce_wq *wq, mblk_t *mp)
{
	struct oce_nic_hdr_wqe *wqeh;
	struct oce_dev *dev;
	struct ether_header *eh;
	struct ether_vlan_header *evh;
	int32_t num_wqes;
	uint16_t etype;
	uint32_t ip_offset;
	uint32_t csum_flags = 0;
	boolean_t use_copy = B_FALSE;
	boolean_t tagged   = B_FALSE;
	uint16_t  vlan_tag;
	uint32_t  reg_value = 0;
	oce_wqe_desc_t *wqed = NULL;
	mblk_t *nmp = NULL;
	mblk_t *tmp = NULL;
	uint32_t pkt_len = 0;
	int num_mblks = 0;
	int ret = 0;
	uint32_t mss = 0;
	uint32_t flags = 0;
	int len = 0;

	/* retrieve the adap priv struct ptr */
	dev = wq->parent;

	/* check if we have enough free slots */
	if (wq->wq_free < dev->tx_reclaim_threshold) {
		(void) oce_process_tx_compl(wq, B_FALSE);
	}
	if (wq->wq_free < OCE_MAX_TX_HDL) {
		return (mp);
	}

	/* check if we should copy */
	for (tmp = mp; tmp != NULL; tmp = tmp->b_cont) {
		pkt_len += MBLKL(tmp);
		num_mblks++;
	}

	if (pkt_len == 0 || num_mblks == 0) {
		freemsg(mp);
		return (NULL);
	}

	/* retrieve LSO information */
	mac_lso_get(mp, &mss, &flags);

	/* get the offload flags */
	mac_hcksum_get(mp, NULL, NULL, NULL, NULL, &csum_flags);

	/* restrict the mapped segment to wat we support */
	if (num_mblks  > OCE_MAX_TX_HDL) {
		nmp = msgpullup(mp, -1);
		if (nmp == NULL) {
			atomic_inc_32(&wq->pkt_drops);
			freemsg(mp);
			return (NULL);
		}
		/* Reset it to new collapsed mp */
		freemsg(mp);
		mp = nmp;
	}

	/* Get the packet descriptor for Tx */
	wqed = kmem_cache_alloc(wq->wqed_cache, KM_NOSLEEP);
	if (wqed == NULL) {
		atomic_inc_32(&wq->pkt_drops);
		freemsg(mp);
		return (NULL);
	}
	eh = (struct ether_header *)(void *)mp->b_rptr;
	if (ntohs(eh->ether_type) == VLAN_TPID) {
		evh = (struct ether_vlan_header *)(void *)mp->b_rptr;
		tagged = B_TRUE;
		etype = ntohs(evh->ether_type);
		ip_offset = sizeof (struct ether_vlan_header);
		pkt_len -= VTAG_SIZE;
		vlan_tag = ntohs(evh->ether_tci);
		oce_remove_vtag(mp);
	} else {
		etype = ntohs(eh->ether_type);
		ip_offset = sizeof (struct ether_header);
	}

	/* Save the WQ pointer */
	wqed->wq = wq;
	wqed->frag_idx = 1; /* index zero is always header */
	wqed->frag_cnt = 0;
	wqed->nhdl = 0;
	wqed->mp = NULL;
	OCE_LIST_LINK_INIT(&wqed->link);

	/* If entire packet is less than the copy limit  just do copy */
	if (pkt_len < dev->tx_bcopy_limit) {
		use_copy = B_TRUE;
		ret = oce_bcopy_wqe(wq, wqed, mp, pkt_len);
	} else {
		/* copy or dma map the individual fragments */
		for (nmp = mp; nmp != NULL; nmp = nmp->b_cont) {
			len = MBLKL(nmp);
			if (len == 0) {
				continue;
			}
			if (len < dev->tx_bcopy_limit) {
				ret = oce_bcopy_wqe(wq, wqed, nmp, len);
			} else {
				ret = oce_map_wqe(wq, wqed, nmp, len);
			}
			if (ret != 0)
				break;
		}
	}

	/*
	 * Any failure other than insufficient Q entries
	 * drop the packet
	 */
	if (ret != 0) {
		oce_free_wqed(wq, wqed);
		atomic_inc_32(&wq->pkt_drops);
		freemsg(mp);
		return (NULL);
	}

	wqeh = (struct oce_nic_hdr_wqe *)&wqed->frag[0];
	bzero(wqeh, sizeof (struct oce_nic_hdr_wqe));

	/* fill rest of wqe header fields based on packet */
	if (flags & HW_LSO) {
		wqeh->u0.s.lso = B_TRUE;
		wqeh->u0.s.lso_mss = mss;
	}
	if (csum_flags & HCK_FULLCKSUM) {
		uint8_t *proto;
		if (etype == ETHERTYPE_IP) {
			proto = (uint8_t *)(void *)
			    (mp->b_rptr + ip_offset);
			if (proto[9] == 6)
				/* IPPROTO_TCP */
				wqeh->u0.s.tcpcs = B_TRUE;
			else if (proto[9] == 17)
				/* IPPROTO_UDP */
				wqeh->u0.s.udpcs = B_TRUE;
		}
	}

	if (csum_flags & HCK_IPV4_HDRCKSUM)
		wqeh->u0.s.ipcs = B_TRUE;
	if (tagged) {
		wqeh->u0.s.vlan = B_TRUE;
		wqeh->u0.s.vlan_tag = vlan_tag;
	}

	wqeh->u0.s.complete = B_TRUE;
	wqeh->u0.s.event = B_TRUE;
	wqeh->u0.s.crc = B_TRUE;
	wqeh->u0.s.total_length = pkt_len;

	num_wqes = wqed->frag_cnt + 1;

	/* h/w expects even no. of WQEs */
	if (num_wqes & 0x1) {
		bzero(&wqed->frag[num_wqes], sizeof (struct oce_nic_frag_wqe));
		num_wqes++;
	}
	wqed->wqe_cnt = (uint16_t)num_wqes;
	wqeh->u0.s.num_wqe = num_wqes;
	DW_SWAP(u32ptr(&wqed->frag[0]), (wqed->wqe_cnt * NIC_WQE_SIZE));

	mutex_enter(&wq->tx_lock);
	if (num_wqes > wq->wq_free) {
		atomic_inc_32(&wq->tx_deferd);
		mutex_exit(&wq->tx_lock);
		goto wqe_fail;
	}
	atomic_add_32(&wq->wq_free, -num_wqes);

	/* fill the wq for adapter */
	oce_fill_ring_descs(wq, wqed);

	/* Set the mp pointer in the wqe descriptor */
	if (use_copy == B_FALSE) {
		wqed->mp = mp;
	}
	/* Add the packet desc to list to be retrieved during cmpl */
	OCE_LIST_INSERT_TAIL(&wq->wqe_desc_list,  wqed);
	(void) ddi_dma_sync(wq->ring->dbuf->dma_handle, 0, 0,
	    DDI_DMA_SYNC_FORDEV);

	/* ring tx doorbell */
	reg_value = (num_wqes << 16) | wq->wq_id;
	/* Ring the door bell  */
	OCE_DB_WRITE32(dev, PD_TXULP_DB, reg_value);
	mutex_exit(&wq->tx_lock);
	if (oce_fm_check_acc_handle(dev, dev->db_handle) != DDI_FM_OK) {
		ddi_fm_service_impact(dev->dip, DDI_SERVICE_DEGRADED);
	}

	/* free mp if copied or packet chain collapsed */
	if (use_copy == B_TRUE) {
		freemsg(mp);
	}
	return (NULL);

wqe_fail:

	if (tagged) {
		oce_insert_vtag(mp, vlan_tag);
	}
	oce_free_wqed(wq, wqed);
	return (mp);
} /* oce_send_packet */
/**
 *	__alloc_skb	-	allocate a network buffer
 *	@size: size to allocate
 *	@gfp_mask: allocation mask
 *	@fclone: allocate from fclone cache instead of head cache
 *		and allocate a cloned (child) skb
 *
 *	Allocate a new &sk_buff. The returned buffer has no headroom and a
 *	tail room of size bytes. The object has a reference count of one.
 *	The return is the buffer. On a failure the return is %NULL.
 *
 *	Buffers may only be allocated from interrupts using a @gfp_mask of
 *	%GFP_ATOMIC.
 */
struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask,
			    int fclone)
{
	kmem_cache_t *cache;
	struct skb_shared_info *shinfo;
	struct sk_buff *skb;
	u8 *data;

	cache = fclone ? skbuff_fclone_cache : skbuff_head_cache;

	/* Get the HEAD */
	skb = kmem_cache_alloc(cache, gfp_mask & ~__GFP_DMA);
	if (!skb)
		goto out;

	/* Get the DATA. Size must match skb_add_mtu(). */
	size = SKB_DATA_ALIGN(size);
	//data = kmalloc(size + sizeof(struct skb_shared_info), gfp_mask);
	//data = kmalloc(size + sizeof(struct skb_shared_info)+4, gfp_mask);//incifer support vlan id
    /* 20007/10/16 pppoe acc incifer    LAN2WAN */
    data = kmalloc(size + sizeof(struct skb_shared_info) + NK_EXTRA_OFFSET, gfp_mask);
	if (!data)
		goto nodata;

	memset(skb, 0, offsetof(struct sk_buff, truesize));
	skb->truesize = size + sizeof(struct sk_buff);
	atomic_set(&skb->users, 1);
	skb->head = data;
	//skb->data = data+4;//incifer
/* 20007/10/16 pppoe acc incifer    LAN2WAN */
    skb->data = data + NK_EXTRA_OFFSET;
	//skb->tail = data+4;//incifer
/* 20007/10/16 pppoe acc incifer    LAN2WAN */
    skb->tail = data + NK_EXTRA_OFFSET;
	//skb->end  = data + size+4;//incifer
    /* 20007/10/16 pppoe acc incifer    LAN2WAN */
    skb->end  = data + size + NK_EXTRA_OFFSET;
	/* make sure we initialize shinfo sequentially */
	shinfo = skb_shinfo(skb);
	atomic_set(&shinfo->dataref, 1);
	shinfo->nr_frags  = 0;
	shinfo->tso_size = 0;
	shinfo->tso_segs = 0;
	shinfo->ufo_size = 0;
	shinfo->ip6_frag_id = 0;
	shinfo->frag_list = NULL;

	if (fclone) {
		struct sk_buff *child = skb + 1;
		atomic_t *fclone_ref = (atomic_t *) (child + 1);

		skb->fclone = SKB_FCLONE_ORIG;
		atomic_set(fclone_ref, 1);

		child->fclone = SKB_FCLONE_UNAVAILABLE;
	}
out:
	return skb;
nodata:
	kmem_cache_free(cache, skb);
	skb = NULL;
	goto out;
}
static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
{
	struct vm_area_struct *mpnt, *tmp, *prev, **pprev;
	struct rb_node **rb_link, *rb_parent;
	int retval;
	unsigned long charge;

	uprobe_start_dup_mmap();
	down_write(&oldmm->mmap_sem);
	flush_cache_dup_mm(oldmm);
	uprobe_dup_mmap(oldmm, mm);
	/*
	 * Not linked in yet - no deadlock potential:
	 */
	down_write_nested(&mm->mmap_sem, SINGLE_DEPTH_NESTING);

	mm->total_vm = oldmm->total_vm;
	mm->shared_vm = oldmm->shared_vm;
	mm->exec_vm = oldmm->exec_vm;
	mm->stack_vm = oldmm->stack_vm;

	rb_link = &mm->mm_rb.rb_node;
	rb_parent = NULL;
	pprev = &mm->mmap;
	retval = ksm_fork(mm, oldmm);
	if (retval)
		goto out;
	retval = khugepaged_fork(mm, oldmm);
	if (retval)
		goto out;

	prev = NULL;
	for (mpnt = oldmm->mmap; mpnt; mpnt = mpnt->vm_next) {
		struct file *file;

		if (mpnt->vm_flags & VM_DONTCOPY) {
			vm_stat_account(mm, mpnt->vm_flags, mpnt->vm_file,
							-vma_pages(mpnt));
			continue;
		}
		charge = 0;
		if (mpnt->vm_flags & VM_ACCOUNT) {
			unsigned long len = vma_pages(mpnt);

			if (security_vm_enough_memory_mm(oldmm, len)) /* sic */
				goto fail_nomem;
			charge = len;
		}
		tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
		if (!tmp)
			goto fail_nomem;
		*tmp = *mpnt;
		INIT_LIST_HEAD(&tmp->anon_vma_chain);
		retval = vma_dup_policy(mpnt, tmp);
		if (retval)
			goto fail_nomem_policy;
		tmp->vm_mm = mm;
		if (anon_vma_fork(tmp, mpnt))
			goto fail_nomem_anon_vma_fork;
		tmp->vm_flags &= ~VM_LOCKED;
		tmp->vm_next = tmp->vm_prev = NULL;
		file = tmp->vm_file;
		if (file) {
			struct inode *inode = file_inode(file);
			struct address_space *mapping = file->f_mapping;

			get_file(file);
			if (tmp->vm_flags & VM_DENYWRITE)
				atomic_dec(&inode->i_writecount);
			mutex_lock(&mapping->i_mmap_mutex);
			if (tmp->vm_flags & VM_SHARED)
				atomic_inc(&mapping->i_mmap_writable);
			flush_dcache_mmap_lock(mapping);
			/* insert tmp into the share list, just after mpnt */
			if (unlikely(tmp->vm_flags & VM_NONLINEAR))
				vma_nonlinear_insert(tmp,
						&mapping->i_mmap_nonlinear);
			else
				vma_interval_tree_insert_after(tmp, mpnt,
							&mapping->i_mmap);
			flush_dcache_mmap_unlock(mapping);
			mutex_unlock(&mapping->i_mmap_mutex);
		}

		/*
		 * Clear hugetlb-related page reserves for children. This only
		 * affects MAP_PRIVATE mappings. Faults generated by the child
		 * are not guaranteed to succeed, even if read-only
		 */
		if (is_vm_hugetlb_page(tmp))
			reset_vma_resv_huge_pages(tmp);

		/*
		 * Link in the new vma and copy the page table entries.
		 */
		*pprev = tmp;
		pprev = &tmp->vm_next;
		tmp->vm_prev = prev;
		prev = tmp;

		__vma_link_rb(mm, tmp, rb_link, rb_parent);
		rb_link = &tmp->vm_rb.rb_right;
		rb_parent = &tmp->vm_rb;

		mm->map_count++;
		retval = copy_page_range(mm, oldmm, mpnt);

		if (tmp->vm_ops && tmp->vm_ops->open)
			tmp->vm_ops->open(tmp);

		if (retval)
			goto out;
	}
	/* a new mm has just been created */
	arch_dup_mmap(oldmm, mm);
	retval = 0;
out:
	up_write(&mm->mmap_sem);
	flush_tlb_mm(oldmm);
	up_write(&oldmm->mmap_sem);
	uprobe_end_dup_mmap();
	return retval;
fail_nomem_anon_vma_fork:
	mpol_put(vma_policy(tmp));
fail_nomem_policy:
	kmem_cache_free(vm_area_cachep, tmp);
fail_nomem:
	retval = -ENOMEM;
	vm_unacct_memory(charge);
	goto out;
}
Beispiel #27
0
Datei: dma.c Projekt: 08opt/linux
int s3c2410_dma_enqueue(unsigned int channel, void *id,
			dma_addr_t data, int size)
{
	struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel);
	struct s3c2410_dma_buf *buf;
	unsigned long flags;

	if (chan == NULL)
		return -EINVAL;

	pr_debug("%s: id=%p, data=%08x, size=%d\n",
		 __func__, id, (unsigned int)data, size);

	buf = kmem_cache_alloc(dma_kmem, GFP_ATOMIC);
	if (buf == NULL) {
		pr_debug("%s: out of memory (%ld alloc)\n",
			 __func__, (long)sizeof(*buf));
		return -ENOMEM;
	}

	//pr_debug("%s: new buffer %p\n", __func__, buf);
	//dbg_showchan(chan);

	buf->next  = NULL;
	buf->data  = buf->ptr = data;
	buf->size  = size;
	buf->id    = id;
	buf->magic = BUF_MAGIC;

	local_irq_save(flags);

	if (chan->curr == NULL) {
		/* we've got nothing loaded... */
		pr_debug("%s: buffer %p queued onto empty channel\n",
			 __func__, buf);

		chan->curr = buf;
		chan->end  = buf;
		chan->next = NULL;
	} else {
		pr_debug("dma%d: %s: buffer %p queued onto non-empty channel\n",
			 chan->number, __func__, buf);

		if (chan->end == NULL)
			pr_debug("dma%d: %s: %p not empty, and chan->end==NULL?\n",
				 chan->number, __func__, chan);

		chan->end->next = buf;
		chan->end = buf;
	}

	/* if necessary, update the next buffer field */
	if (chan->next == NULL)
		chan->next = buf;

	/* check to see if we can load a buffer */
	if (chan->state == S3C2410_DMA_RUNNING) {
		if (chan->load_state == S3C2410_DMALOAD_1LOADED && 1) {
			if (s3c2410_dma_waitforload(chan, __LINE__) == 0) {
				printk(KERN_ERR "dma%d: loadbuffer:"
				       "timeout loading buffer\n",
				       chan->number);
				dbg_showchan(chan);
				local_irq_restore(flags);
				return -EINVAL;
			}
		}

		while (s3c2410_dma_canload(chan) && chan->next != NULL) {
			s3c2410_dma_loadbuffer(chan, chan->next);
		}
	} else if (chan->state == S3C2410_DMA_IDLE) {
		if (chan->flags & S3C2410_DMAF_AUTOSTART) {
			s3c2410_dma_ctrl(chan->number | DMACH_LOW_LEVEL,
					 S3C2410_DMAOP_START);
		}
	}

	local_irq_restore(flags);
	return 0;
}
Beispiel #28
0
/**
 * ecryptfs_lookup_and_interpose_lower - Perform a lookup
 */
int ecryptfs_lookup_and_interpose_lower(struct dentry *ecryptfs_dentry,
					struct dentry *lower_dentry,
					struct inode *ecryptfs_dir_inode,
					struct nameidata *ecryptfs_nd)
{
	struct dentry *lower_dir_dentry;
	struct vfsmount *lower_mnt;
	struct inode *lower_inode;
	struct ecryptfs_mount_crypt_stat *mount_crypt_stat;
	struct ecryptfs_crypt_stat *crypt_stat;
	char *page_virt = NULL;
	u64 file_size;
	int rc = 0;

	lower_dir_dentry = lower_dentry->d_parent;
	lower_mnt = mntget(ecryptfs_dentry_to_lower_mnt(
				   ecryptfs_dentry->d_parent));
	lower_inode = lower_dentry->d_inode;
	fsstack_copy_attr_atime(ecryptfs_dir_inode, lower_dir_dentry->d_inode);
	BUG_ON(!atomic_read(&lower_dentry->d_count));
	ecryptfs_set_dentry_private(ecryptfs_dentry,
				    kmem_cache_alloc(ecryptfs_dentry_info_cache,
						     GFP_KERNEL));
	if (!ecryptfs_dentry_to_private(ecryptfs_dentry)) {
		rc = -ENOMEM;
		printk(KERN_ERR "%s: Out of memory whilst attempting "
		       "to allocate ecryptfs_dentry_info struct\n",
			__func__);
		goto out_put;
	}
	ecryptfs_set_dentry_lower(ecryptfs_dentry, lower_dentry);
	ecryptfs_set_dentry_lower_mnt(ecryptfs_dentry, lower_mnt);
	if (!lower_dentry->d_inode) {
		/* We want to add because we couldn't find in lower */
		d_add(ecryptfs_dentry, NULL);
		goto out;
	}
	rc = ecryptfs_interpose(lower_dentry, ecryptfs_dentry,
				ecryptfs_dir_inode->i_sb, 1);
	if (rc) {
		printk(KERN_ERR "%s: Error interposing; rc = [%d]\n",
		       __func__, rc);
		goto out;
	}
	if (S_ISDIR(lower_inode->i_mode))
		goto out;
	if (S_ISLNK(lower_inode->i_mode))
		goto out;
	if (special_file(lower_inode->i_mode))
		goto out;
	if (!ecryptfs_nd)
		goto out;
	/* Released in this function */
	page_virt = kmem_cache_zalloc(ecryptfs_header_cache_2, GFP_USER);
	if (!page_virt) {
		printk(KERN_ERR "%s: Cannot kmem_cache_zalloc() a page\n",
		       __func__);
		rc = -ENOMEM;
		goto out;
	}
	if (!ecryptfs_inode_to_private(ecryptfs_dentry->d_inode)->lower_file) {
		rc = ecryptfs_init_persistent_file(ecryptfs_dentry);
		if (rc) {
			printk(KERN_ERR "%s: Error attempting to initialize "
			       "the persistent file for the dentry with name "
			       "[%s]; rc = [%d]\n", __func__,
			       ecryptfs_dentry->d_name.name, rc);
			goto out_free_kmem;
		}
	}
	crypt_stat = &ecryptfs_inode_to_private(
					ecryptfs_dentry->d_inode)->crypt_stat;
	/* TODO: lock for crypt_stat comparison */
	if (!(crypt_stat->flags & ECRYPTFS_POLICY_APPLIED))
			ecryptfs_set_default_sizes(crypt_stat);
	rc = ecryptfs_read_and_validate_header_region(page_virt,
						      ecryptfs_dentry->d_inode);
	if (rc) {
		rc = ecryptfs_read_and_validate_xattr_region(page_virt,
							     ecryptfs_dentry);
		if (rc) {
			rc = 0;
			goto out_free_kmem;
		}
		crypt_stat->flags |= ECRYPTFS_METADATA_IN_XATTR;
	}
	mount_crypt_stat = &ecryptfs_superblock_to_private(
		ecryptfs_dentry->d_sb)->mount_crypt_stat;
	if (mount_crypt_stat->flags & ECRYPTFS_ENCRYPTED_VIEW_ENABLED) {
		if (crypt_stat->flags & ECRYPTFS_METADATA_IN_XATTR)
			file_size = (crypt_stat->num_header_bytes_at_front
				     + i_size_read(lower_dentry->d_inode));
		else
			file_size = i_size_read(lower_dentry->d_inode);
	} else {
		file_size = get_unaligned_be64(page_virt);
	}
	i_size_write(ecryptfs_dentry->d_inode, (loff_t)file_size);
out_free_kmem:
	kmem_cache_free(ecryptfs_header_cache_2, page_virt);
	goto out;
out_put:
	dput(lower_dentry);
	mntput(lower_mnt);
	d_drop(ecryptfs_dentry);
out:
	return rc;
}
Beispiel #29
0
static inline int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
{
	struct vm_area_struct *mpnt, *tmp, **pprev;
	struct rb_node **rb_link, *rb_parent;
	int retval;
	unsigned long charge;
	struct mempolicy *pol;

	down_write(&oldmm->mmap_sem);
	flush_cache_dup_mm(oldmm);
	/*
	 * Not linked in yet - no deadlock potential:
	 */
	down_write_nested(&mm->mmap_sem, SINGLE_DEPTH_NESTING);

	mm->locked_vm = 0;
	mm->mmap = NULL;
	mm->mmap_cache = NULL;
	mm->free_area_cache = oldmm->mmap_base;
	mm->cached_hole_size = ~0UL;
	mm->map_count = 0;
	cpus_clear(mm->cpu_vm_mask);
	mm->mm_rb = RB_ROOT;
	rb_link = &mm->mm_rb.rb_node;
	rb_parent = NULL;
	pprev = &mm->mmap;

	for (mpnt = oldmm->mmap; mpnt; mpnt = mpnt->vm_next) {
		struct file *file;

		if (mpnt->vm_flags & VM_DONTCOPY) {
			long pages = vma_pages(mpnt);
			mm->total_vm -= pages;
			vm_stat_account(mm, mpnt->vm_flags, mpnt->vm_file,
								-pages);
			continue;
		}
		charge = 0;
		if (mpnt->vm_flags & VM_ACCOUNT) {
			unsigned int len = (mpnt->vm_end - mpnt->vm_start) >> PAGE_SHIFT;
			if (security_vm_enough_memory(len))
				goto fail_nomem;
			charge = len;
		}
		tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
		if (!tmp)
			goto fail_nomem;
		*tmp = *mpnt;
		pol = mpol_copy(vma_policy(mpnt));
		retval = PTR_ERR(pol);
		if (IS_ERR(pol))
			goto fail_nomem_policy;
		vma_set_policy(tmp, pol);
		tmp->vm_flags &= ~VM_LOCKED;
		tmp->vm_mm = mm;
		tmp->vm_next = NULL;
		anon_vma_link(tmp);
		file = tmp->vm_file;
		if (file) {
			struct inode *inode = file->f_path.dentry->d_inode;
			get_file(file);
			if (tmp->vm_flags & VM_DENYWRITE)
				atomic_dec(&inode->i_writecount);
      
			/* insert tmp into the share list, just after mpnt */
			spin_lock(&file->f_mapping->i_mmap_lock);
			tmp->vm_truncate_count = mpnt->vm_truncate_count;
			flush_dcache_mmap_lock(file->f_mapping);
			vma_prio_tree_add(tmp, mpnt);
			flush_dcache_mmap_unlock(file->f_mapping);
			spin_unlock(&file->f_mapping->i_mmap_lock);
		}

		/*
		 * Link in the new vma and copy the page table entries.
		 */
		*pprev = tmp;
		pprev = &tmp->vm_next;

		__vma_link_rb(mm, tmp, rb_link, rb_parent);
		rb_link = &tmp->vm_rb.rb_right;
		rb_parent = &tmp->vm_rb;

		mm->map_count++;
		retval = copy_page_range(mm, oldmm, mpnt);

		if (tmp->vm_ops && tmp->vm_ops->open)
			tmp->vm_ops->open(tmp);

		if (retval)
			goto out;
	}
Beispiel #30
0
/*
 * void task_init(void)
 *
 * Overview
 *   task_init() initializes task-related hashes, caches, and the task id
 *   space.  Additionally, task_init() establishes p0 as a member of task0.
 *   Called by main().
 *
 * Return values
 *   None.
 *
 * Caller's context
 *   task_init() must be called prior to MP startup.
 */
void
task_init(void)
{
	proc_t *p = &p0;
	mod_hash_hndl_t hndl;
	rctl_set_t *set;
	rctl_alloc_gp_t *gp;
	rctl_entity_p_t e;

	/*
	 * Initialize task_cache and taskid_space.
	 */
	task_cache = kmem_cache_create("task_cache", sizeof (task_t),
	    0, NULL, NULL, NULL, NULL, NULL, 0);
	taskid_space = id_space_create("taskid_space", 0, MAX_TASKID);

	/*
	 * Initialize task hash table.
	 */
	task_hash = mod_hash_create_idhash("task_hash", task_hash_size,
	    mod_hash_null_valdtor);

	/*
	 * Initialize task-based rctls.
	 */
	rc_task_lwps = rctl_register("task.max-lwps", RCENTITY_TASK,
	    RCTL_GLOBAL_NOACTION | RCTL_GLOBAL_COUNT, INT_MAX, INT_MAX,
	    &task_lwps_ops);
	rc_task_nprocs = rctl_register("task.max-processes", RCENTITY_TASK,
	    RCTL_GLOBAL_NOACTION | RCTL_GLOBAL_COUNT, INT_MAX, INT_MAX,
	    &task_procs_ops);
	rc_task_cpu_time = rctl_register("task.max-cpu-time", RCENTITY_TASK,
	    RCTL_GLOBAL_NOACTION | RCTL_GLOBAL_DENY_NEVER |
	    RCTL_GLOBAL_CPU_TIME | RCTL_GLOBAL_INFINITE |
	    RCTL_GLOBAL_UNOBSERVABLE | RCTL_GLOBAL_SECONDS, UINT64_MAX,
	    UINT64_MAX, &task_cpu_time_ops);

	/*
	 * Create task0 and place p0 in it as a member.
	 */
	task0p = kmem_cache_alloc(task_cache, KM_SLEEP);
	bzero(task0p, sizeof (task_t));

	task0p->tk_tkid = id_alloc(taskid_space);
	task0p->tk_usage = kmem_zalloc(sizeof (task_usage_t), KM_SLEEP);
	task0p->tk_inherited = kmem_zalloc(sizeof (task_usage_t), KM_SLEEP);
	task0p->tk_proj = project_hold_by_id(0, &zone0,
	    PROJECT_HOLD_INSERT);
	task0p->tk_flags = TASK_NORMAL;
	task0p->tk_nlwps = p->p_lwpcnt;
	task0p->tk_nprocs = 1;
	task0p->tk_zone = global_zone;
	task0p->tk_commit_next = NULL;

	set = rctl_set_create();
	gp = rctl_set_init_prealloc(RCENTITY_TASK);
	mutex_enter(&curproc->p_lock);
	e.rcep_p.task = task0p;
	e.rcep_t = RCENTITY_TASK;
	task0p->tk_rctls = rctl_set_init(RCENTITY_TASK, curproc, &e, set, gp);
	mutex_exit(&curproc->p_lock);
	rctl_prealloc_destroy(gp);

	(void) mod_hash_reserve(task_hash, &hndl);
	mutex_enter(&task_hash_lock);
	ASSERT(task_find(task0p->tk_tkid, GLOBAL_ZONEID) == NULL);
	if (mod_hash_insert_reserve(task_hash,
	    (mod_hash_key_t)(uintptr_t)task0p->tk_tkid,
	    (mod_hash_val_t *)task0p, hndl) != 0) {
		mod_hash_cancel(task_hash, &hndl);
		panic("unable to insert task %d(%p)", task0p->tk_tkid,
		    (void *)task0p);
	}
	mutex_exit(&task_hash_lock);

	task0p->tk_memb_list = p;

	task0p->tk_nprocs_kstat = task_kstat_create(task0p, task0p->tk_zone);

	/*
	 * Initialize task pointers for p0, including doubly linked list of task
	 * members.
	 */
	p->p_task = task0p;
	p->p_taskprev = p->p_tasknext = p;
	task_hold(task0p);
}