Beispiel #1
0
/**
 * Find first fit block
 **/
block* firstFit(int size)
{
	int minSize = sizeof(block);
	if (size < minSize)
		size = minSize;
	pageHeader* header = (pageHeader*)(mainPage->ptr);
	block* cursor = (block*)(header->head);

	while (cursor)
	{
		// no enough size
		if (cursor->size < size)
		{
			cursor = cursor->next;
			continue;
		}
		// perfect fit
		else if (cursor->size == size || (cursor->size - size) < minSize)
		{
			deleteEntry(cursor);
			return (void*)cursor;
		}
		// fit, add fragement to the linked list
		else
		{
			addEntry((void*)((long int)cursor + size), (cursor->size - size));
			deleteEntry(cursor);
			return (void*)cursor;
		}
	}
	// no enough space, then add a new page
	kma_page_t* newPage = get_page();
	initPage(newPage);
	header->pageCount++;
	return firstFit(size);
}
int virtualMemoryFault(struct vm_area_struct * _vma, struct vm_fault * _vmf)
{
    struct page *page;
    unsigned long offset;
    void *page_ptr;

    printk("\nPage fault by OSP");
    printk("\n\tVirtual Memory Falut Offset: %lx", _vmf->pgoff);
    printk("\n\tVirtual Memory Fault Virtual Address: %p", _vmf->virtual_address);

    printk("\n\tstart: %lx", _vma->vm_start);
    printk("\n\tend: %lx", _vma->vm_end);
    printk("\n\tVirtual Memory offset: %lx", _vma->vm_pgoff);

    page_ptr = NULL;
    //__va -> Convert virtual to physical memory address.
    if((NULL == _vma) || (NULL == share_mem))
    {
        printk("return VM_FAULT_SIGBUS!\n");
        return VM_FAULT_SIGBUS;
    }

    offset = (unsigned long)_vmf->virtual_address - _vma->vm_start;
    if(offset >= PAGE_SIZE)
    {
        printk("return VM_FAULT_SIGBUS!");
        return VM_FAULT_SIGBUS;
    }

    page_ptr = share_mem + offset;
    page = vmalloc_to_page(page_ptr);
    get_page(page);
    _vmf->page = page;

    return 0;
}
/* nopage is called the first time a memory area is accessed which is not in memory,
 * it does the actual mapping between kernel and user space memory
 */
int autok_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
{
    unsigned long offset;
    struct log_mmap_info *dev = vma->vm_private_data;
    int result = VM_FAULT_SIGBUS;
    struct page *page;
    //void *pageptr = NULL; /* default to "missing" */ 
    pgoff_t pgoff = vmf->pgoff;  
    offset = (pgoff << PAGE_SHIFT) + (vma->vm_pgoff << PAGE_SHIFT);
    if (!dev->data) {
        printk("no data\n");
        return 0;    
    }
    if (offset >= dev->size)
        return 0;
    //offset >>= PAGE_SHIFT; /* offset is a number of pages */  
    /* got it, now convert pointer to a struct page and increment the count */
    page = virt_to_page(&dev->data[offset]);
    get_page(page);
    vmf->page = page;
    result = 0;
    return result;

}
int appfs_mem_writepage(const devfs_handle_t * handle, void * ctl){
	DECLARE_APPFS_CONFIG();
	mem_writepage_t * write_page_info = ctl;
	u32 type;
	int result;
	get_page(config, (u32)write_page_info->addr, (u32)write_page_info->nbyte, &type);

	if( type == 0 ){ return SYSFS_SET_RETURN(EINVAL); }

	if( type & MEM_FLAG_IS_RAM ){
		//check to see if the memcpy fits in RAM
		mem_writepage_t * write_page_info = ctl;
		memcpy((void*)write_page_info->addr, write_page_info->buf, write_page_info->nbyte);
		result = write_page_info->nbyte;
	} else {
		if( config->flash_driver != 0 ){
			result = config->flash_driver->driver.ioctl(&config->flash_driver->handle, I_FLASH_WRITEPAGE, ctl);
		} else {
			return SYSFS_SET_RETURN(ENOTSUP);
		}
	}

	return result;
}
Beispiel #5
0
static struct page *alpha_core_agp_vm_nopage(struct vm_area_struct *vma,
					     unsigned long address,
					     int *type)
{
	alpha_agp_info *agp = agp_bridge->dev_private_data;
	dma_addr_t dma_addr;
	unsigned long pa;
	struct page *page;

	dma_addr = address - vma->vm_start + agp->aperture.bus_base;
	pa = agp->ops->translate(agp, dma_addr);

	if (pa == (unsigned long)-EINVAL)
		return NULL;	/* no translation */

	/*
	 * Get the page, inc the use count, and return it
	 */
	page = virt_to_page(__va(pa));
	get_page(page);
	if (type)
		*type = VM_FAULT_MINOR;
	return page;
}
Beispiel #6
0
static int uio_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
{
    struct uio_device *idev = vma->vm_private_data;
    struct page *page;
    unsigned long offset;

    int mi = uio_find_mem_index(vma);
    if (mi < 0)
        return VM_FAULT_SIGBUS;

    /*
     * We need to subtract mi because userspace uses offset = N*PAGE_SIZE
     * to use mem[N].
     */
    offset = (vmf->pgoff - mi) << PAGE_SHIFT;

    if (idev->info->mem[mi].memtype == UIO_MEM_LOGICAL)
        page = virt_to_page(idev->info->mem[mi].addr + offset);
    else
        page = vmalloc_to_page((void *)(unsigned long)idev->info->mem[mi].addr + offset);
    get_page(page);
    vmf->page = page;
    return 0;
}
Beispiel #7
0
int guest_remove_page(struct domain *d, unsigned long gmfn)
{
    struct page_info *page;
#ifdef CONFIG_X86
    p2m_type_t p2mt;
#endif
    unsigned long mfn;

#ifdef CONFIG_X86
    mfn = mfn_x(get_gfn_query(d, gmfn, &p2mt)); 
    if ( unlikely(p2m_is_paging(p2mt)) )
    {
        guest_physmap_remove_page(d, gmfn, mfn, 0);
        put_gfn(d, gmfn);
        /* If the page hasn't yet been paged out, there is an
         * actual page that needs to be released. */
        if ( p2mt == p2m_ram_paging_out )
        {
            ASSERT(mfn_valid(mfn));
            page = mfn_to_page(mfn);
            if ( test_and_clear_bit(_PGC_allocated, &page->count_info) )
                put_page(page);
        }
        p2m_mem_paging_drop_page(d, gmfn, p2mt);
        return 1;
    }
    if ( p2mt == p2m_mmio_direct )
    {
        clear_mmio_p2m_entry(d, gmfn, _mfn(mfn));
        put_gfn(d, gmfn);
        return 1;
    }
#else
    mfn = gmfn_to_mfn(d, gmfn);
#endif
    if ( unlikely(!mfn_valid(mfn)) )
    {
        put_gfn(d, gmfn);
        gdprintk(XENLOG_INFO, "Domain %u page number %lx invalid\n",
                d->domain_id, gmfn);
        return 0;
    }
            
#ifdef CONFIG_X86
    if ( p2m_is_shared(p2mt) )
    {
        /* Unshare the page, bail out on error. We unshare because 
         * we might be the only one using this shared page, and we
         * need to trigger proper cleanup. Once done, this is 
         * like any other page. */
        if ( mem_sharing_unshare_page(d, gmfn, 0) )
        {
            put_gfn(d, gmfn);
            (void)mem_sharing_notify_enomem(d, gmfn, 0);
            return 0;
        }
        /* Maybe the mfn changed */
        mfn = mfn_x(get_gfn_query_unlocked(d, gmfn, &p2mt));
        ASSERT(!p2m_is_shared(p2mt));
    }
#endif /* CONFIG_X86 */

    page = mfn_to_page(mfn);
    if ( unlikely(!get_page(page, d)) )
    {
        put_gfn(d, gmfn);
        gdprintk(XENLOG_INFO, "Bad page free for domain %u\n", d->domain_id);
        return 0;
    }

    if ( test_and_clear_bit(_PGT_pinned, &page->u.inuse.type_info) )
        put_page_and_type(page);

    /*
     * With the lack of an IOMMU on some platforms, domains with DMA-capable
     * device must retrieve the same pfn when the hypercall populate_physmap
     * is called.
     *
     * For this purpose (and to match populate_physmap() behavior), the page
     * is kept allocated.
     */
    if ( !is_domain_direct_mapped(d) &&
         test_and_clear_bit(_PGC_allocated, &page->count_info) )
        put_page(page);

    guest_physmap_remove_page(d, gmfn, mfn, 0);

    put_page(page);
    put_gfn(d, gmfn);

    return 1;
}
Beispiel #8
0
int j4fs_writepage(struct page *page, struct writeback_control *wbc)
{
    struct address_space *mapping = page->mapping;
    loff_t offset = (loff_t) page->index << PAGE_CACHE_SHIFT;
    struct inode *inode;
    unsigned long end_index;
    char *buffer;
    int nWritten = 0;
    unsigned nBytes;
    j4fs_ctrl ctl;
    int nErr;

    if(j4fs_panic==1) {
        J4FS_T(J4FS_TRACE_ALWAYS,("%s %d: j4fs panic\n",__FUNCTION__,__LINE__));
        return -ENOSPC;
    }

    J4FS_T(J4FS_TRACE_FS,("%s %d\n",__FUNCTION__,__LINE__));

    if (!mapping) BUG();

    inode = mapping->host;

    if (!inode) BUG();

    if (offset > inode->i_size) {
        J4FS_T(J4FS_TRACE_FS,
               ("j4fs_writepage at %08x, inode size = %08x!!!\n",
                (unsigned)(page->index << PAGE_CACHE_SHIFT),
                (unsigned)inode->i_size));
        J4FS_T(J4FS_TRACE_FS,
               ("                -> don't care!!\n"));
        unlock_page(page);
        return 0;
    }

    end_index = inode->i_size >> PAGE_CACHE_SHIFT;

    /* easy case */
    if (page->index < end_index)
        nBytes = PAGE_CACHE_SIZE;
    else
        nBytes = inode->i_size & (PAGE_CACHE_SIZE - 1);

    get_page(page);

    buffer = kmap(page);

    j4fs_GrossLock();

    J4FS_T(J4FS_TRACE_FS,
           ("j4fs_writepage: index=%08x,nBytes=%08x,inode.i_size=%05x\n", (unsigned)(page->index << PAGE_CACHE_SHIFT), nBytes,(int)inode->i_size));

    // write file
    ctl.buffer=buffer;
    ctl.count=nBytes;
    ctl.id=inode->i_ino;
    ctl.index=offset;

    nErr=fsd_write(&ctl);

    if(nErr==J4FS_RETRY_WRITE) nErr=fsd_write(&ctl);

    J4FS_T(J4FS_TRACE_FS,
           ("j4fs_writepage: index=%08x,nBytes=%08x,inode.i_size=%05x\n", (unsigned)(page->index << PAGE_CACHE_SHIFT), nBytes,(int)inode->i_size));

    j4fs_GrossUnlock();

    kunmap(page);
    SetPageUptodate(page);
    unlock_page(page);
    put_page(page);

    return (nWritten == nBytes) ? 0 : -ENOSPC;

}
/*
 * Identify large copies from remotely-cached memory, and copy them
 * via memcpy_multicache() if they look good, otherwise fall back
 * to the particular kind of copying passed as the memcpy_t function.
 */
static unsigned long fast_copy(void *dest, const void *source, int len,
			       memcpy_t func)
{
	/*
	 * Check if it's big enough to bother with.  We may end up doing a
	 * small copy via TLB manipulation if we're near a page boundary,
	 * but presumably we'll make it up when we hit the second page.
	 */
	while (len >= LARGE_COPY_CUTOFF) {
		int copy_size, bytes_left_on_page;
		pte_t *src_ptep, *dst_ptep;
		pte_t src_pte, dst_pte;
		struct page *src_page, *dst_page;

		/* Is the source page oloc'ed to a remote cpu? */
retry_source:
		src_ptep = virt_to_pte(current->mm, (unsigned long)source);
		if (src_ptep == NULL)
			break;
		src_pte = *src_ptep;
		if (!hv_pte_get_present(src_pte) ||
		    !hv_pte_get_readable(src_pte) ||
		    hv_pte_get_mode(src_pte) != HV_PTE_MODE_CACHE_TILE_L3)
			break;
		if (get_remote_cache_cpu(src_pte) == smp_processor_id())
			break;
		src_page = pfn_to_page(pte_pfn(src_pte));
		get_page(src_page);
		if (pte_val(src_pte) != pte_val(*src_ptep)) {
			put_page(src_page);
			goto retry_source;
		}
		if (pte_huge(src_pte)) {
			/* Adjust the PTE to correspond to a small page */
			int pfn = pte_pfn(src_pte);
			pfn += (((unsigned long)source & (HPAGE_SIZE-1))
				>> PAGE_SHIFT);
			src_pte = pfn_pte(pfn, src_pte);
			src_pte = pte_mksmall(src_pte);
		}

		/* Is the destination page writable? */
retry_dest:
		dst_ptep = virt_to_pte(current->mm, (unsigned long)dest);
		if (dst_ptep == NULL) {
			put_page(src_page);
			break;
		}
		dst_pte = *dst_ptep;
		if (!hv_pte_get_present(dst_pte) ||
		    !hv_pte_get_writable(dst_pte)) {
			put_page(src_page);
			break;
		}
		dst_page = pfn_to_page(pte_pfn(dst_pte));
		if (dst_page == src_page) {
			/*
			 * Source and dest are on the same page; this
			 * potentially exposes us to incoherence if any
			 * part of src and dest overlap on a cache line.
			 * Just give up rather than trying to be precise.
			 */
			put_page(src_page);
			break;
		}
		get_page(dst_page);
		if (pte_val(dst_pte) != pte_val(*dst_ptep)) {
			put_page(dst_page);
			goto retry_dest;
		}
		if (pte_huge(dst_pte)) {
			/* Adjust the PTE to correspond to a small page */
			int pfn = pte_pfn(dst_pte);
			pfn += (((unsigned long)dest & (HPAGE_SIZE-1))
				>> PAGE_SHIFT);
			dst_pte = pfn_pte(pfn, dst_pte);
			dst_pte = pte_mksmall(dst_pte);
		}

		/* All looks good: create a cachable PTE and copy from it */
		copy_size = len;
		bytes_left_on_page =
			PAGE_SIZE - (((int)source) & (PAGE_SIZE-1));
		if (copy_size > bytes_left_on_page)
			copy_size = bytes_left_on_page;
		bytes_left_on_page =
			PAGE_SIZE - (((int)dest) & (PAGE_SIZE-1));
		if (copy_size > bytes_left_on_page)
			copy_size = bytes_left_on_page;
		memcpy_multicache(dest, source, dst_pte, src_pte, copy_size);

		/* Release the pages */
		put_page(dst_page);
		put_page(src_page);

		/* Continue on the next page */
		dest += copy_size;
		source += copy_size;
		len -= copy_size;
	}

	return func(dest, source, len);
}
Beispiel #10
0
static int __ip6_append_data(struct sock *sk,
			     struct flowi6 *fl6,
			     struct sk_buff_head *queue,
			     struct inet_cork *cork,
			     struct inet6_cork *v6_cork,
			     struct page_frag *pfrag,
			     int getfrag(void *from, char *to, int offset,
					 int len, int odd, struct sk_buff *skb),
			     void *from, int length, int transhdrlen,
			     unsigned int flags, int dontfrag)
{
	struct sk_buff *skb, *skb_prev = NULL;
	unsigned int maxfraglen, fragheaderlen, mtu, orig_mtu;
	int exthdrlen = 0;
	int dst_exthdrlen = 0;
	int hh_len;
	int copy;
	int err;
	int offset = 0;
	__u8 tx_flags = 0;
	u32 tskey = 0;
	struct rt6_info *rt = (struct rt6_info *)cork->dst;
	struct ipv6_txoptions *opt = v6_cork->opt;
	int csummode = CHECKSUM_NONE;
	unsigned int maxnonfragsize, headersize;

	skb = skb_peek_tail(queue);
	if (!skb) {
		exthdrlen = opt ? opt->opt_flen : 0;
		dst_exthdrlen = rt->dst.header_len - rt->rt6i_nfheader_len;
	}

	mtu = cork->fragsize;
	orig_mtu = mtu;

	hh_len = LL_RESERVED_SPACE(rt->dst.dev);

	fragheaderlen = sizeof(struct ipv6hdr) + rt->rt6i_nfheader_len +
			(opt ? opt->opt_nflen : 0);
	maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen -
		     sizeof(struct frag_hdr);

	headersize = sizeof(struct ipv6hdr) +
		     (opt ? opt->opt_flen + opt->opt_nflen : 0) +
		     (dst_allfrag(&rt->dst) ?
		      sizeof(struct frag_hdr) : 0) +
		     rt->rt6i_nfheader_len;

	if (cork->length + length > mtu - headersize && dontfrag &&
	    (sk->sk_protocol == IPPROTO_UDP ||
	     sk->sk_protocol == IPPROTO_RAW)) {
		ipv6_local_rxpmtu(sk, fl6, mtu - headersize +
				sizeof(struct ipv6hdr));
		goto emsgsize;
	}

	if (ip6_sk_ignore_df(sk))
		maxnonfragsize = sizeof(struct ipv6hdr) + IPV6_MAXPLEN;
	else
		maxnonfragsize = mtu;

	if (cork->length + length > maxnonfragsize - headersize) {
emsgsize:
		ipv6_local_error(sk, EMSGSIZE, fl6,
				 mtu - headersize +
				 sizeof(struct ipv6hdr));
		return -EMSGSIZE;
	}

	/* CHECKSUM_PARTIAL only with no extension headers and when
	 * we are not going to fragment
	 */
	if (transhdrlen && sk->sk_protocol == IPPROTO_UDP &&
	    headersize == sizeof(struct ipv6hdr) &&
	    length < mtu - headersize &&
	    !(flags & MSG_MORE) &&
	    rt->dst.dev->features & NETIF_F_V6_CSUM)
		csummode = CHECKSUM_PARTIAL;

	if (sk->sk_type == SOCK_DGRAM || sk->sk_type == SOCK_RAW) {
		sock_tx_timestamp(sk, &tx_flags);
		if (tx_flags & SKBTX_ANY_SW_TSTAMP &&
		    sk->sk_tsflags & SOF_TIMESTAMPING_OPT_ID)
			tskey = sk->sk_tskey++;
	}

	/*
	 * Let's try using as much space as possible.
	 * Use MTU if total length of the message fits into the MTU.
	 * Otherwise, we need to reserve fragment header and
	 * fragment alignment (= 8-15 octects, in total).
	 *
	 * Note that we may need to "move" the data from the tail of
	 * of the buffer to the new fragment when we split
	 * the message.
	 *
	 * FIXME: It may be fragmented into multiple chunks
	 *        at once if non-fragmentable extension headers
	 *        are too large.
	 * --yoshfuji
	 */

	cork->length += length;
	if (((length > mtu) ||
	     (skb && skb_is_gso(skb))) &&
	    (sk->sk_protocol == IPPROTO_UDP) &&
	    (rt->dst.dev->features & NETIF_F_UFO) &&
	    (sk->sk_type == SOCK_DGRAM) && !udp_get_no_check6_tx(sk)) {
		err = ip6_ufo_append_data(sk, queue, getfrag, from, length,
					  hh_len, fragheaderlen, exthdrlen,
					  transhdrlen, mtu, flags, fl6);
		if (err)
			goto error;
		return 0;
	}

	if (!skb)
		goto alloc_new_skb;

	while (length > 0) {
		/* Check if the remaining data fits into current packet. */
		copy = (cork->length <= mtu && !(cork->flags & IPCORK_ALLFRAG) ? mtu : maxfraglen) - skb->len;
		if (copy < length)
			copy = maxfraglen - skb->len;

		if (copy <= 0) {
			char *data;
			unsigned int datalen;
			unsigned int fraglen;
			unsigned int fraggap;
			unsigned int alloclen;
alloc_new_skb:
			/* There's no room in the current skb */
			if (skb)
				fraggap = skb->len - maxfraglen;
			else
				fraggap = 0;
			/* update mtu and maxfraglen if necessary */
			if (!skb || !skb_prev)
				ip6_append_data_mtu(&mtu, &maxfraglen,
						    fragheaderlen, skb, rt,
						    orig_mtu);

			skb_prev = skb;

			/*
			 * If remaining data exceeds the mtu,
			 * we know we need more fragment(s).
			 */
			datalen = length + fraggap;

			if (datalen > (cork->length <= mtu && !(cork->flags & IPCORK_ALLFRAG) ? mtu : maxfraglen) - fragheaderlen)
				datalen = maxfraglen - fragheaderlen - rt->dst.trailer_len;
			if ((flags & MSG_MORE) &&
			    !(rt->dst.dev->features&NETIF_F_SG))
				alloclen = mtu;
			else
				alloclen = datalen + fragheaderlen;

			alloclen += dst_exthdrlen;

			if (datalen != length + fraggap) {
				/*
				 * this is not the last fragment, the trailer
				 * space is regarded as data space.
				 */
				datalen += rt->dst.trailer_len;
			}

			alloclen += rt->dst.trailer_len;
			fraglen = datalen + fragheaderlen;

			/*
			 * We just reserve space for fragment header.
			 * Note: this may be overallocation if the message
			 * (without MSG_MORE) fits into the MTU.
			 */
			alloclen += sizeof(struct frag_hdr);

			if (transhdrlen) {
				skb = sock_alloc_send_skb(sk,
						alloclen + hh_len,
						(flags & MSG_DONTWAIT), &err);
			} else {
				skb = NULL;
				if (atomic_read(&sk->sk_wmem_alloc) <=
				    2 * sk->sk_sndbuf)
					skb = sock_wmalloc(sk,
							   alloclen + hh_len, 1,
							   sk->sk_allocation);
				if (unlikely(!skb))
					err = -ENOBUFS;
			}
			if (!skb)
				goto error;
			/*
			 *	Fill in the control structures
			 */
			skb->protocol = htons(ETH_P_IPV6);
			skb->ip_summed = csummode;
			skb->csum = 0;
			/* reserve for fragmentation and ipsec header */
			skb_reserve(skb, hh_len + sizeof(struct frag_hdr) +
				    dst_exthdrlen);

			/* Only the initial fragment is time stamped */
			skb_shinfo(skb)->tx_flags = tx_flags;
			tx_flags = 0;
			skb_shinfo(skb)->tskey = tskey;
			tskey = 0;

			/*
			 *	Find where to start putting bytes
			 */
			data = skb_put(skb, fraglen);
			skb_set_network_header(skb, exthdrlen);
			data += fragheaderlen;
			skb->transport_header = (skb->network_header +
						 fragheaderlen);
			if (fraggap) {
				skb->csum = skb_copy_and_csum_bits(
					skb_prev, maxfraglen,
					data + transhdrlen, fraggap, 0);
				skb_prev->csum = csum_sub(skb_prev->csum,
							  skb->csum);
				data += fraggap;
				pskb_trim_unique(skb_prev, maxfraglen);
			}
			copy = datalen - transhdrlen - fraggap;

			if (copy < 0) {
				err = -EINVAL;
				kfree_skb(skb);
				goto error;
			} else if (copy > 0 && getfrag(from, data + transhdrlen, offset, copy, fraggap, skb) < 0) {
				err = -EFAULT;
				kfree_skb(skb);
				goto error;
			}

			offset += copy;
			length -= datalen - fraggap;
			transhdrlen = 0;
			exthdrlen = 0;
			dst_exthdrlen = 0;

			/*
			 * Put the packet on the pending queue
			 */
			__skb_queue_tail(queue, skb);
			continue;
		}

		if (copy > length)
			copy = length;

		if (!(rt->dst.dev->features&NETIF_F_SG)) {
			unsigned int off;

			off = skb->len;
			if (getfrag(from, skb_put(skb, copy),
						offset, copy, off, skb) < 0) {
				__skb_trim(skb, off);
				err = -EFAULT;
				goto error;
			}
		} else {
			int i = skb_shinfo(skb)->nr_frags;

			err = -ENOMEM;
			if (!sk_page_frag_refill(sk, pfrag))
				goto error;

			if (!skb_can_coalesce(skb, i, pfrag->page,
					      pfrag->offset)) {
				err = -EMSGSIZE;
				if (i == MAX_SKB_FRAGS)
					goto error;

				__skb_fill_page_desc(skb, i, pfrag->page,
						     pfrag->offset, 0);
				skb_shinfo(skb)->nr_frags = ++i;
				get_page(pfrag->page);
			}
			copy = min_t(int, copy, pfrag->size - pfrag->offset);
			if (getfrag(from,
				    page_address(pfrag->page) + pfrag->offset,
				    offset, copy, skb->len, skb) < 0)
				goto error_efault;

			pfrag->offset += copy;
			skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], copy);
			skb->len += copy;
			skb->data_len += copy;
			skb->truesize += copy;
			atomic_add(copy, &sk->sk_wmem_alloc);
		}
		offset += copy;
		length -= copy;
	}

	return 0;

error_efault:
	err = -EFAULT;
error:
	cork->length -= length;
	IP6_INC_STATS(sock_net(sk), rt->rt6i_idev, IPSTATS_MIB_OUTDISCARDS);
	return err;
}
Beispiel #11
0
int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to, int offset, int len, int odd, struct sk_buff *skb),
		    void *from, int length, int transhdrlen,
		    int hlimit, struct ipv6_txoptions *opt, struct flowi *fl, struct rt6_info *rt,
		    unsigned int flags)
{
	struct inet_sock *inet = inet_sk(sk);
	struct ipv6_pinfo *np = inet6_sk(sk);
	struct sk_buff *skb;
	unsigned int maxfraglen, fragheaderlen;
	int exthdrlen;
	int hh_len;
	int mtu;
	int copy;
	int err;
	int offset = 0;
	int csummode = CHECKSUM_NONE;

	if (flags&MSG_PROBE)
		return 0;
	if (skb_queue_empty(&sk->sk_write_queue)) {
		/*
		 * setup for corking
		 */
		if (opt) {
			if (np->cork.opt == NULL) {
				np->cork.opt = kmalloc(opt->tot_len,
						       sk->sk_allocation);
				if (unlikely(np->cork.opt == NULL))
					return -ENOBUFS;
			} else if (np->cork.opt->tot_len < opt->tot_len) {
				printk(KERN_DEBUG "ip6_append_data: invalid option length\n");
				return -EINVAL;
			}
			memcpy(np->cork.opt, opt, opt->tot_len);
			inet->cork.flags |= IPCORK_OPT;
			/* need source address above miyazawa*/
		}
		dst_hold(&rt->u.dst);
		np->cork.rt = rt;
		inet->cork.fl = *fl;
		np->cork.hop_limit = hlimit;
		inet->cork.fragsize = mtu = dst_mtu(rt->u.dst.path);
		if (dst_allfrag(rt->u.dst.path))
			inet->cork.flags |= IPCORK_ALLFRAG;
		inet->cork.length = 0;
		sk->sk_sndmsg_page = NULL;
		sk->sk_sndmsg_off = 0;
		exthdrlen = rt->u.dst.header_len + (opt ? opt->opt_flen : 0);
		length += exthdrlen;
		transhdrlen += exthdrlen;
	} else {
		rt = np->cork.rt;
		fl = &inet->cork.fl;
		if (inet->cork.flags & IPCORK_OPT)
			opt = np->cork.opt;
		transhdrlen = 0;
		exthdrlen = 0;
		mtu = inet->cork.fragsize;
	}

	hh_len = LL_RESERVED_SPACE(rt->u.dst.dev);

	fragheaderlen = sizeof(struct ipv6hdr) + (opt ? opt->opt_nflen : 0);
	maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen - sizeof(struct frag_hdr);

	if (mtu <= sizeof(struct ipv6hdr) + IPV6_MAXPLEN) {
		if (inet->cork.length + length > sizeof(struct ipv6hdr) + IPV6_MAXPLEN - fragheaderlen) {
			ipv6_local_error(sk, EMSGSIZE, fl, mtu-exthdrlen);
			return -EMSGSIZE;
		}
	}

	/*
	 * Let's try using as much space as possible.
	 * Use MTU if total length of the message fits into the MTU.
	 * Otherwise, we need to reserve fragment header and
	 * fragment alignment (= 8-15 octects, in total).
	 *
	 * Note that we may need to "move" the data from the tail of
	 * of the buffer to the new fragment when we split 
	 * the message.
	 *
	 * FIXME: It may be fragmented into multiple chunks 
	 *        at once if non-fragmentable extension headers
	 *        are too large.
	 * --yoshfuji 
	 */

	inet->cork.length += length;

	if ((skb = skb_peek_tail(&sk->sk_write_queue)) == NULL)
		goto alloc_new_skb;

	while (length > 0) {
		/* Check if the remaining data fits into current packet. */
		copy = (inet->cork.length <= mtu && !(inet->cork.flags & IPCORK_ALLFRAG) ? mtu : maxfraglen) - skb->len;
		if (copy < length)
			copy = maxfraglen - skb->len;

		if (copy <= 0) {
			char *data;
			unsigned int datalen;
			unsigned int fraglen;
			unsigned int fraggap;
			unsigned int alloclen;
			struct sk_buff *skb_prev;
alloc_new_skb:
			skb_prev = skb;

			/* There's no room in the current skb */
			if (skb_prev)
				fraggap = skb_prev->len - maxfraglen;
			else
				fraggap = 0;

			/*
			 * If remaining data exceeds the mtu,
			 * we know we need more fragment(s).
			 */
			datalen = length + fraggap;
			if (datalen > (inet->cork.length <= mtu && !(inet->cork.flags & IPCORK_ALLFRAG) ? mtu : maxfraglen) - fragheaderlen)
				datalen = maxfraglen - fragheaderlen;

			fraglen = datalen + fragheaderlen;
			if ((flags & MSG_MORE) &&
			    !(rt->u.dst.dev->features&NETIF_F_SG))
				alloclen = mtu;
			else
				alloclen = datalen + fragheaderlen;

			/*
			 * The last fragment gets additional space at tail.
			 * Note: we overallocate on fragments with MSG_MODE
			 * because we have no idea if we're the last one.
			 */
			if (datalen == length + fraggap)
				alloclen += rt->u.dst.trailer_len;

			/*
			 * We just reserve space for fragment header.
			 * Note: this may be overallocation if the message 
			 * (without MSG_MORE) fits into the MTU.
			 */
			alloclen += sizeof(struct frag_hdr);

			if (transhdrlen) {
				skb = sock_alloc_send_skb(sk,
						alloclen + hh_len,
						(flags & MSG_DONTWAIT), &err);
			} else {
				skb = NULL;
				if (atomic_read(&sk->sk_wmem_alloc) <=
				    2 * sk->sk_sndbuf)
					skb = sock_wmalloc(sk,
							   alloclen + hh_len, 1,
							   sk->sk_allocation);
				if (unlikely(skb == NULL))
					err = -ENOBUFS;
			}
			if (skb == NULL)
				goto error;
			/*
			 *	Fill in the control structures
			 */
			skb->ip_summed = csummode;
			skb->csum = 0;
			/* reserve for fragmentation */
			skb_reserve(skb, hh_len+sizeof(struct frag_hdr));

			/*
			 *	Find where to start putting bytes
			 */
			data = skb_put(skb, fraglen);
			skb->nh.raw = data + exthdrlen;
			data += fragheaderlen;
			skb->h.raw = data + exthdrlen;

			if (fraggap) {
				skb->csum = skb_copy_and_csum_bits(
					skb_prev, maxfraglen,
					data + transhdrlen, fraggap, 0);
				skb_prev->csum = csum_sub(skb_prev->csum,
							  skb->csum);
				data += fraggap;
				skb_trim(skb_prev, maxfraglen);
			}
			copy = datalen - transhdrlen - fraggap;
			if (copy < 0) {
				err = -EINVAL;
				kfree_skb(skb);
				goto error;
			} else if (copy > 0 && getfrag(from, data + transhdrlen, offset, copy, fraggap, skb) < 0) {
				err = -EFAULT;
				kfree_skb(skb);
				goto error;
			}

			offset += copy;
			length -= datalen - fraggap;
			transhdrlen = 0;
			exthdrlen = 0;
			csummode = CHECKSUM_NONE;

			/*
			 * Put the packet on the pending queue
			 */
			__skb_queue_tail(&sk->sk_write_queue, skb);
			continue;
		}

		if (copy > length)
			copy = length;

		if (!(rt->u.dst.dev->features&NETIF_F_SG)) {
			unsigned int off;

			off = skb->len;
			if (getfrag(from, skb_put(skb, copy),
						offset, copy, off, skb) < 0) {
				__skb_trim(skb, off);
				err = -EFAULT;
				goto error;
			}
		} else {
			int i = skb_shinfo(skb)->nr_frags;
			skb_frag_t *frag = &skb_shinfo(skb)->frags[i-1];
			struct page *page = sk->sk_sndmsg_page;
			int off = sk->sk_sndmsg_off;
			unsigned int left;

			if (page && (left = PAGE_SIZE - off) > 0) {
				if (copy >= left)
					copy = left;
				if (page != frag->page) {
					if (i == MAX_SKB_FRAGS) {
						err = -EMSGSIZE;
						goto error;
					}
					get_page(page);
					skb_fill_page_desc(skb, i, page, sk->sk_sndmsg_off, 0);
					frag = &skb_shinfo(skb)->frags[i];
				}
			} else if(i < MAX_SKB_FRAGS) {
				if (copy > PAGE_SIZE)
					copy = PAGE_SIZE;
				page = alloc_pages(sk->sk_allocation, 0);
				if (page == NULL) {
					err = -ENOMEM;
					goto error;
				}
				sk->sk_sndmsg_page = page;
				sk->sk_sndmsg_off = 0;

				skb_fill_page_desc(skb, i, page, 0, 0);
				frag = &skb_shinfo(skb)->frags[i];
				skb->truesize += PAGE_SIZE;
				atomic_add(PAGE_SIZE, &sk->sk_wmem_alloc);
			} else {
				err = -EMSGSIZE;
				goto error;
			}
			if (getfrag(from, page_address(frag->page)+frag->page_offset+frag->size, offset, copy, skb->len, skb) < 0) {
				err = -EFAULT;
				goto error;
			}
			sk->sk_sndmsg_off += copy;
			frag->size += copy;
			skb->len += copy;
			skb->data_len += copy;
		}
		offset += copy;
		length -= copy;
	}
	return 0;
error:
	inet->cork.length -= length;
	IP6_INC_STATS(IPSTATS_MIB_OUTDISCARDS);
	return err;
}
static unsigned long fast_copy(void *dest, const void *source, int len,
			       memcpy_t func)
{
	/*
                                                                   
                                                                  
                                                                
  */
	while (len >= LARGE_COPY_CUTOFF) {
		int copy_size, bytes_left_on_page;
		pte_t *src_ptep, *dst_ptep;
		pte_t src_pte, dst_pte;
		struct page *src_page, *dst_page;

		/*                                             */
retry_source:
		src_ptep = virt_to_pte(current->mm, (unsigned long)source);
		if (src_ptep == NULL)
			break;
		src_pte = *src_ptep;
		if (!hv_pte_get_present(src_pte) ||
		    !hv_pte_get_readable(src_pte) ||
		    hv_pte_get_mode(src_pte) != HV_PTE_MODE_CACHE_TILE_L3)
			break;
		if (get_remote_cache_cpu(src_pte) == smp_processor_id())
			break;
		src_page = pfn_to_page(hv_pte_get_pfn(src_pte));
		get_page(src_page);
		if (pte_val(src_pte) != pte_val(*src_ptep)) {
			put_page(src_page);
			goto retry_source;
		}
		if (pte_huge(src_pte)) {
			/*                                              */
			int pfn = hv_pte_get_pfn(src_pte);
			pfn += (((unsigned long)source & (HPAGE_SIZE-1))
				>> PAGE_SHIFT);
			src_pte = pfn_pte(pfn, src_pte);
			src_pte = pte_mksmall(src_pte);
		}

		/*                                   */
retry_dest:
		dst_ptep = virt_to_pte(current->mm, (unsigned long)dest);
		if (dst_ptep == NULL) {
			put_page(src_page);
			break;
		}
		dst_pte = *dst_ptep;
		if (!hv_pte_get_present(dst_pte) ||
		    !hv_pte_get_writable(dst_pte)) {
			put_page(src_page);
			break;
		}
		dst_page = pfn_to_page(hv_pte_get_pfn(dst_pte));
		if (dst_page == src_page) {
			/*
                                                
                                                  
                                                   
                                                    
    */
			put_page(src_page);
			break;
		}
		get_page(dst_page);
		if (pte_val(dst_pte) != pte_val(*dst_ptep)) {
			put_page(dst_page);
			goto retry_dest;
		}
		if (pte_huge(dst_pte)) {
			/*                                              */
			int pfn = hv_pte_get_pfn(dst_pte);
			pfn += (((unsigned long)dest & (HPAGE_SIZE-1))
				>> PAGE_SHIFT);
			dst_pte = pfn_pte(pfn, dst_pte);
			dst_pte = pte_mksmall(dst_pte);
		}

		/*                                                        */
		copy_size = len;
		bytes_left_on_page =
			PAGE_SIZE - (((int)source) & (PAGE_SIZE-1));
		if (copy_size > bytes_left_on_page)
			copy_size = bytes_left_on_page;
		bytes_left_on_page =
			PAGE_SIZE - (((int)dest) & (PAGE_SIZE-1));
		if (copy_size > bytes_left_on_page)
			copy_size = bytes_left_on_page;
		memcpy_multicache(dest, source, dst_pte, src_pte, copy_size);

		/*                   */
		put_page(dst_page);
		put_page(src_page);

		/*                           */
		dest += copy_size;
		source += copy_size;
		len -= copy_size;
	}

	return func(dest, source, len);
}
Beispiel #13
0
void ScrollBar::_notification(int p_what) {

	if (p_what == NOTIFICATION_DRAW) {

		RID ci = get_canvas_item();

		Ref<Texture> decr = highlight == HIGHLIGHT_DECR ? get_icon("decrement_highlight") : get_icon("decrement");
		Ref<Texture> incr = highlight == HIGHLIGHT_INCR ? get_icon("increment_highlight") : get_icon("increment");
		Ref<StyleBox> bg = has_focus() ? get_stylebox("scroll_focus") : get_stylebox("scroll");

		Ref<StyleBox> grabber;
		if (drag.active)
			grabber = get_stylebox("grabber_pressed");
		else if (highlight == HIGHLIGHT_RANGE)
			grabber = get_stylebox("grabber_highlight");
		else
			grabber = get_stylebox("grabber");

		Point2 ofs;

		VisualServer *vs = VisualServer::get_singleton();

		vs->canvas_item_add_texture_rect(ci, Rect2(Point2(), decr->get_size()), decr->get_rid());

		if (orientation == HORIZONTAL)
			ofs.x += decr->get_width();
		else
			ofs.y += decr->get_height();

		Size2 area = get_size();

		if (orientation == HORIZONTAL)
			area.width -= incr->get_width() + decr->get_width();
		else
			area.height -= incr->get_height() + decr->get_height();

		bg->draw(ci, Rect2(ofs, area));

		if (orientation == HORIZONTAL)
			ofs.width += area.width;
		else
			ofs.height += area.height;

		vs->canvas_item_add_texture_rect(ci, Rect2(ofs, decr->get_size()), incr->get_rid());
		Rect2 grabber_rect;

		if (orientation == HORIZONTAL) {

			grabber_rect.size.width = get_grabber_size();
			grabber_rect.size.height = get_size().height;
			grabber_rect.position.y = 0;
			grabber_rect.position.x = get_grabber_offset() + decr->get_width() + bg->get_margin(MARGIN_LEFT);
		} else {

			grabber_rect.size.width = get_size().width;
			grabber_rect.size.height = get_grabber_size();
			grabber_rect.position.y = get_grabber_offset() + decr->get_height() + bg->get_margin(MARGIN_TOP);
			grabber_rect.position.x = 0;
		}

		grabber->draw(ci, grabber_rect);
	}

	if (p_what == NOTIFICATION_ENTER_TREE) {

		if (has_node(drag_slave_path)) {
			Node *n = get_node(drag_slave_path);
			drag_slave = n->cast_to<Control>();
		}

		if (drag_slave) {
			drag_slave->connect("gui_input", this, "_drag_slave_input");
			drag_slave->connect("tree_exited", this, "_drag_slave_exit", varray(), CONNECT_ONESHOT);
		}
	}
	if (p_what == NOTIFICATION_EXIT_TREE) {

		if (drag_slave) {
			drag_slave->disconnect("gui_input", this, "_drag_slave_input");
			drag_slave->disconnect("tree_exited", this, "_drag_slave_exit");
		}

		drag_slave = NULL;
	}

	if (p_what == NOTIFICATION_FIXED_PROCESS) {

		if (scrolling) {
			if (get_value() != target_scroll) {
				double target = target_scroll - get_value();
				double dist = sqrt(target * target);
				double vel = ((target / dist) * 500) * get_fixed_process_delta_time();

				if (vel >= dist) {
					set_value(target_scroll);
				} else {
					set_value(get_value() + vel);
				}
			} else {
				scrolling = false;
				set_fixed_process(false);
			}
		} else if (drag_slave_touching) {

			if (drag_slave_touching_deaccel) {

				Vector2 pos = Vector2(orientation == HORIZONTAL ? get_value() : 0, orientation == VERTICAL ? get_value() : 0);
				pos += drag_slave_speed * get_fixed_process_delta_time();

				bool turnoff = false;

				if (orientation == HORIZONTAL) {

					if (pos.x < 0) {
						pos.x = 0;
						turnoff = true;
					}

					if (pos.x > (get_max() - get_page())) {
						pos.x = get_max() - get_page();
						turnoff = true;
					}

					set_value(pos.x);

					float sgn_x = drag_slave_speed.x < 0 ? -1 : 1;
					float val_x = Math::abs(drag_slave_speed.x);
					val_x -= 1000 * get_fixed_process_delta_time();

					if (val_x < 0) {
						turnoff = true;
					}

					drag_slave_speed.x = sgn_x * val_x;

				} else {

					if (pos.y < 0) {
						pos.y = 0;
						turnoff = true;
					}

					if (pos.y > (get_max() - get_page())) {
						pos.y = get_max() - get_page();
						turnoff = true;
					}

					set_value(pos.y);

					float sgn_y = drag_slave_speed.y < 0 ? -1 : 1;
					float val_y = Math::abs(drag_slave_speed.y);
					val_y -= 1000 * get_fixed_process_delta_time();

					if (val_y < 0) {
						turnoff = true;
					}
					drag_slave_speed.y = sgn_y * val_y;
				}

				if (turnoff) {
					set_fixed_process(false);
					drag_slave_touching = false;
					drag_slave_touching_deaccel = false;
				}

			} else {

				if (time_since_motion == 0 || time_since_motion > 0.1) {

					Vector2 diff = drag_slave_accum - last_drag_slave_accum;
					last_drag_slave_accum = drag_slave_accum;
					drag_slave_speed = diff / get_fixed_process_delta_time();
				}

				time_since_motion += get_fixed_process_delta_time();
			}
		}
	}

	if (p_what == NOTIFICATION_MOUSE_EXIT) {

		highlight = HIGHLIGHT_NONE;
		update();
	}
}
Beispiel #14
0
}

bool ScrollBar::is_smooth_scroll_enabled() const {
	return smooth_scroll_enabled;
}

#if 0

void ScrollBar::mouse_button(const Point2& p_pos, int b->get_button_index(),bool b->is_pressed(),int p_modifier_mask) {

	// wheel!

	if (b->get_button_index()==BUTTON_WHEEL_UP && b->is_pressed()) {

		if (orientation==VERTICAL)
			set_val( get_val() - get_page() / 4.0 );
		else
			set_val( get_val() + get_page() / 4.0 );

	}
	if (b->get_button_index()==BUTTON_WHEEL_DOWN && b->is_pressed()) {

		if (orientation==HORIZONTAL)
			set_val( get_val() - get_page() / 4.0 );
		else
			set_val( get_val() + get_page() / 4.0  );
	}

	if (b->get_button_index()!=BUTTON_LEFT)
		return;
void rtl_get_page(struct page *page)
{
	get_page(page);
}
Beispiel #16
0
static int ept_set_epte(struct vmx_vcpu *vcpu, int make_write,
			unsigned long gpa, unsigned long hva)
{
	int ret;
	epte_t *epte, flags;
	struct page *page;
	unsigned huge_shift;
	int level;

	ret = get_user_pages_fast(hva, 1, make_write, &page);
	if (ret != 1) {
		ret = ept_set_pfnmap_epte(vcpu, make_write, gpa, hva);
		if (ret)
			printk(KERN_ERR "ept: failed to get user page %lx\n", hva);
		return ret;
	}

	spin_lock(&vcpu->ept_lock);

	huge_shift = compound_order(compound_head(page)) + PAGE_SHIFT;
	level = 0;
	if (huge_shift == 30)
		level = 2;
	else if (huge_shift == 21)
		level = 1;

	ret = ept_lookup_gpa(vcpu, (void *) gpa,
			     level, 1, &epte);
	if (ret) {
		spin_unlock(&vcpu->ept_lock);
		put_page(page);
		printk(KERN_ERR "ept: failed to lookup EPT entry\n");
		return ret;
	}

	if (epte_present(*epte)) {
		if (!epte_big(*epte) && level == 2)
			ept_clear_l2_epte(epte);
		else if (!epte_big(*epte) && level == 1)
			ept_clear_l1_epte(epte);
		else
			ept_clear_epte(epte);
	}

	flags = __EPTE_READ | __EPTE_EXEC |
		__EPTE_TYPE(EPTE_TYPE_WB) | __EPTE_IPAT;
	if (make_write)
		flags |= __EPTE_WRITE;
	if (vcpu->ept_ad_enabled) {
		/* premark A/D to avoid extra memory references */
		flags |= __EPTE_A;
		if (make_write)
			flags |= __EPTE_D;
	}

	if (level) {
		struct page *tmp = page;
		page = compound_head(page);
		get_page(page);
		put_page(tmp);

		flags |= __EPTE_SZ;
	}

	*epte = epte_addr(page_to_phys(page)) | flags;

	spin_unlock(&vcpu->ept_lock);

	return 0;
}
Beispiel #17
0
/*
 * Deliver read data back to initiator.
 * XXX TBD handle resource problems later.
 */
int ft_queue_data_in(struct se_cmd *se_cmd)
{
	struct ft_cmd *cmd = container_of(se_cmd, struct ft_cmd, se_cmd);
	struct fc_frame *fp = NULL;
	struct fc_exch *ep;
	struct fc_lport *lport;
	struct scatterlist *sg = NULL;
	size_t remaining;
	u32 f_ctl = FC_FC_EX_CTX | FC_FC_REL_OFF;
	u32 mem_off = 0;
	u32 fh_off = 0;
	u32 frame_off = 0;
	size_t frame_len = 0;
	size_t mem_len = 0;
	size_t tlen;
	size_t off_in_page;
	struct page *page = NULL;
	int use_sg;
	int error;
	void *page_addr;
	void *from;
	void *to = NULL;

	if (cmd->aborted)
		return 0;

	if (se_cmd->scsi_status == SAM_STAT_TASK_SET_FULL)
		goto queue_status;

	ep = fc_seq_exch(cmd->seq);
	lport = ep->lp;
	cmd->seq = lport->tt.seq_start_next(cmd->seq);

	remaining = se_cmd->data_length;

	/*
	 * Setup to use first mem list entry, unless no data.
	 */
	BUG_ON(remaining && !se_cmd->t_data_sg);
	if (remaining) {
		sg = se_cmd->t_data_sg;
		mem_len = sg->length;
		mem_off = sg->offset;
		page = sg_page(sg);
	}

	/* no scatter/gather in skb for odd word length due to fc_seq_send() */
	use_sg = !(remaining % 4);

	while (remaining) {
		struct fc_seq *seq = cmd->seq;

		if (!seq) {
			pr_debug("%s: Command aborted, xid 0x%x\n",
				 __func__, ep->xid);
			break;
		}
		if (!mem_len) {
			sg = sg_next(sg);
			mem_len = min((size_t)sg->length, remaining);
			mem_off = sg->offset;
			page = sg_page(sg);
		}
		if (!frame_len) {
			/*
			 * If lport's has capability of Large Send Offload LSO)
			 * , then allow 'frame_len' to be as big as 'lso_max'
			 * if indicated transfer length is >= lport->lso_max
			 */
			frame_len = (lport->seq_offload) ? lport->lso_max :
							  cmd->sess->max_frame;
			frame_len = min(frame_len, remaining);
			fp = fc_frame_alloc(lport, use_sg ? 0 : frame_len);
			if (!fp)
				return -ENOMEM;
			to = fc_frame_payload_get(fp, 0);
			fh_off = frame_off;
			frame_off += frame_len;
			/*
			 * Setup the frame's max payload which is used by base
			 * driver to indicate HW about max frame size, so that
			 * HW can do fragmentation appropriately based on
			 * "gso_max_size" of underline netdev.
			 */
			fr_max_payload(fp) = cmd->sess->max_frame;
		}
		tlen = min(mem_len, frame_len);

		if (use_sg) {
			off_in_page = mem_off;
			BUG_ON(!page);
			get_page(page);
			skb_fill_page_desc(fp_skb(fp),
					   skb_shinfo(fp_skb(fp))->nr_frags,
					   page, off_in_page, tlen);
			fr_len(fp) += tlen;
			fp_skb(fp)->data_len += tlen;
			fp_skb(fp)->truesize +=
					PAGE_SIZE << compound_order(page);
		} else {
			BUG_ON(!page);
			from = kmap_atomic(page + (mem_off >> PAGE_SHIFT));
			page_addr = from;
			from += mem_off & ~PAGE_MASK;
			tlen = min(tlen, (size_t)(PAGE_SIZE -
						(mem_off & ~PAGE_MASK)));
			memcpy(to, from, tlen);
			kunmap_atomic(page_addr);
			to += tlen;
		}

		mem_off += tlen;
		mem_len -= tlen;
		frame_len -= tlen;
		remaining -= tlen;

		if (frame_len &&
		    (skb_shinfo(fp_skb(fp))->nr_frags < FC_FRAME_SG_LEN))
			continue;
		if (!remaining)
			f_ctl |= FC_FC_END_SEQ;
		fc_fill_fc_hdr(fp, FC_RCTL_DD_SOL_DATA, ep->did, ep->sid,
			       FC_TYPE_FCP, f_ctl, fh_off);
		error = lport->tt.seq_send(lport, seq, fp);
		if (error) {
			pr_info_ratelimited("%s: Failed to send frame %p, "
						"xid <0x%x>, remaining %zu, "
						"lso_max <0x%x>\n",
						__func__, fp, ep->xid,
						remaining, lport->lso_max);
			/*
			 * Go ahead and set TASK_SET_FULL status ignoring the
			 * rest of the DataIN, and immediately attempt to
			 * send the response via ft_queue_status() in order
			 * to notify the initiator that it should reduce it's
			 * per LUN queue_depth.
			 */
			se_cmd->scsi_status = SAM_STAT_TASK_SET_FULL;
			break;
		}
	}
queue_status:
	return ft_queue_status(se_cmd);
}
Beispiel #18
0
void ScrollBar::_gui_input(Ref<InputEvent> p_event) {

	Ref<InputEventMouseButton> b = p_event;

	if (b.is_valid()) {
		accept_event();

		if (b->get_button_index() == 5 && b->is_pressed()) {

			/*
			if (orientation==VERTICAL)
				set_val( get_val() + get_page() / 4.0 );
			else
			*/
			set_value(get_value() + get_page() / 4.0);
			accept_event();
		}

		if (b->get_button_index() == 4 && b->is_pressed()) {

			/*
			if (orientation==HORIZONTAL)
				set_val( get_val() - get_page() / 4.0 );
			else
			*/
			set_value(get_value() - get_page() / 4.0);
			accept_event();
		}

		if (b->get_button_index() != 1)
			return;

		if (b->is_pressed()) {

			double ofs = orientation == VERTICAL ? b->get_position().y : b->get_position().x;
			Ref<Texture> decr = get_icon("decrement");
			Ref<Texture> incr = get_icon("increment");

			double decr_size = orientation == VERTICAL ? decr->get_height() : decr->get_width();
			double incr_size = orientation == VERTICAL ? incr->get_height() : incr->get_width();
			double grabber_ofs = get_grabber_offset();
			double grabber_size = get_grabber_size();
			double total = orientation == VERTICAL ? get_size().height : get_size().width;

			if (ofs < decr_size) {

				set_value(get_value() - (custom_step >= 0 ? custom_step : get_step()));
				return;
			}

			if (ofs > total - incr_size) {

				set_value(get_value() + (custom_step >= 0 ? custom_step : get_step()));
				return;
			}

			ofs -= decr_size;

			if (ofs < grabber_ofs) {

				if (scrolling) {
					target_scroll = target_scroll - get_page();
				} else {
					target_scroll = get_value() - get_page();
				}

				if (smooth_scroll_enabled) {
					scrolling = true;
					set_fixed_process(true);
				} else {
					set_value(target_scroll);
				}
				return;
			}

			ofs -= grabber_ofs;

			if (ofs < grabber_size) {

				drag.active = true;
				drag.pos_at_click = grabber_ofs + ofs;
				drag.value_at_click = get_as_ratio();
				update();
			} else {
				if (scrolling) {
					target_scroll = target_scroll + get_page();
				} else {
					target_scroll = get_value() + get_page();
				}

				if (smooth_scroll_enabled) {
					scrolling = true;
					set_fixed_process(true);
				} else {
					set_value(target_scroll);
				}
			}

		} else {

			drag.active = false;
			update();
		}
	}

	Ref<InputEventMouseMotion> m = p_event;

	if (m.is_valid()) {

		accept_event();

		if (drag.active) {

			double ofs = orientation == VERTICAL ? m->get_position().y : m->get_position().x;
			Ref<Texture> decr = get_icon("decrement");

			double decr_size = orientation == VERTICAL ? decr->get_height() : decr->get_width();
			ofs -= decr_size;

			double diff = (ofs - drag.pos_at_click) / get_area_size();

			set_as_ratio(drag.value_at_click + diff);
		} else {

			double ofs = orientation == VERTICAL ? m->get_position().y : m->get_position().x;
			Ref<Texture> decr = get_icon("decrement");
			Ref<Texture> incr = get_icon("increment");

			double decr_size = orientation == VERTICAL ? decr->get_height() : decr->get_width();
			double incr_size = orientation == VERTICAL ? incr->get_height() : incr->get_width();
			double total = orientation == VERTICAL ? get_size().height : get_size().width;

			HighlightStatus new_hilite;

			if (ofs < decr_size) {

				new_hilite = HIGHLIGHT_DECR;

			} else if (ofs > total - incr_size) {

				new_hilite = HIGHLIGHT_INCR;

			} else {

				new_hilite = HIGHLIGHT_RANGE;
			}

			if (new_hilite != highlight) {

				highlight = new_hilite;
				update();
			}
		}
	}

	Ref<InputEventKey> k = p_event;

	if (k.is_valid()) {

		if (!k->is_pressed())
			return;

		switch (k->get_scancode()) {

			case KEY_LEFT: {

				if (orientation != HORIZONTAL)
					return;
				set_value(get_value() - (custom_step >= 0 ? custom_step : get_step()));

			} break;
			case KEY_RIGHT: {

				if (orientation != HORIZONTAL)
					return;
				set_value(get_value() + (custom_step >= 0 ? custom_step : get_step()));

			} break;
			case KEY_UP: {

				if (orientation != VERTICAL)
					return;

				set_value(get_value() - (custom_step >= 0 ? custom_step : get_step()));

			} break;
			case KEY_DOWN: {

				if (orientation != VERTICAL)
					return;
				set_value(get_value() + (custom_step >= 0 ? custom_step : get_step()));

			} break;
			case KEY_HOME: {

				set_value(get_min());

			} break;
			case KEY_END: {

				set_value(get_max());

			} break;
		}
	}
}
Beispiel #19
0
/*
 * copy one vm_area from one task to the other. Assumes the page tables
 * already present in the new task to be cleared in the whole range
 * covered by this vma.
 *
 * 08Jan98 Merged into one routine from several inline routines to reduce
 *         variable count and make things faster. -jj
 */
int copy_page_range(struct mm_struct *dst, struct mm_struct *src,
			struct vm_area_struct *vma)
{
	pgd_t * src_pgd, * dst_pgd;
	unsigned long address = vma->vm_start;
	unsigned long end = vma->vm_end;
	unsigned long cow = (vma->vm_flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE;

	src_pgd = pgd_offset(src, address)-1;
	dst_pgd = pgd_offset(dst, address)-1;
	
	for (;;) {
		pmd_t * src_pmd, * dst_pmd;

		src_pgd++; dst_pgd++;
		
		/* copy_pmd_range */
		
		if (pgd_none(*src_pgd))
			goto skip_copy_pmd_range;
		if (pgd_bad(*src_pgd)) {
			pgd_ERROR(*src_pgd);
			pgd_clear(src_pgd);
skip_copy_pmd_range:	address = (address + PGDIR_SIZE) & PGDIR_MASK;
			if (!address || (address >= end))
				goto out;
			continue;
		}
		if (pgd_none(*dst_pgd)) {
			if (!pmd_alloc(dst_pgd, 0))
				goto nomem;
		}
		
		src_pmd = pmd_offset(src_pgd, address);
		dst_pmd = pmd_offset(dst_pgd, address);

		do {
			pte_t * src_pte, * dst_pte;
		
			/* copy_pte_range */
		
			if (pmd_none(*src_pmd))
				goto skip_copy_pte_range;
			if (pmd_bad(*src_pmd)) {
				pmd_ERROR(*src_pmd);
				pmd_clear(src_pmd);
skip_copy_pte_range:		address = (address + PMD_SIZE) & PMD_MASK;
				if (address >= end)
					goto out;
				goto cont_copy_pmd_range;
			}
			if (pmd_none(*dst_pmd)) {
				if (!pte_alloc(dst_pmd, 0))
					goto nomem;
			}
			
			src_pte = pte_offset(src_pmd, address);
			dst_pte = pte_offset(dst_pmd, address);
			
			do {
				pte_t pte = *src_pte;
				struct page *ptepage;
				
				/* copy_one_pte */

				if (pte_none(pte))
					goto cont_copy_pte_range_noset;
				if (!pte_present(pte)) {
					swap_duplicate(pte_to_swp_entry(pte));
					goto cont_copy_pte_range;
				}
				ptepage = pte_page(pte);
				if ((!VALID_PAGE(ptepage)) || 
				    PageReserved(ptepage))
					goto cont_copy_pte_range;

				/* If it's a COW mapping, write protect it both in the parent and the child */
				if (cow) {
					ptep_set_wrprotect(src_pte);
					pte = *src_pte;
				}

				/* If it's a shared mapping, mark it clean in the child */
				if (vma->vm_flags & VM_SHARED)
					pte = pte_mkclean(pte);
				pte = pte_mkold(pte);
				get_page(ptepage);

cont_copy_pte_range:		set_pte(dst_pte, pte);
cont_copy_pte_range_noset:	address += PAGE_SIZE;
				if (address >= end)
					goto out;
				src_pte++;
				dst_pte++;
			} while ((unsigned long)src_pte & PTE_TABLE_MASK);
		
cont_copy_pmd_range:	src_pmd++;
			dst_pmd++;
		} while ((unsigned long)src_pmd & PMD_TABLE_MASK);
	}
out:
	return 0;

nomem:
	return -ENOMEM;
}
//���������������
//time:�趨�ٴ���������ȴ�ʱ�䣨���ӣ�
void load_password_input( uint16_t time )
{
	int8_t cmp;
	uint16_t x,y;
	
	/* ��ʼ�� */
	set_page(password_input);	
	
	/* �ж��Ƿ���Ҫ���� */
	if ( RESET == is_need_password(RESET,time) )		//����Ҫ����
	{
		return;
	}	

	/* ����GUI���� */
	gui_password_putin();

	while ( get_page() == password_input )
	{				
		/* ��ⰴ������ */
		pKey->key_sta = judge_key_sta(DISABLE_SHIFT);
		
		/* ������ */
		HandlerPageWarn();
		
		/* ��ȡ��ǰ�������ڵ�λ������ */	
		x = pPP->pos.x + 1;		
		y = pPP->pos.y + 1;					

		/* ͨ�������������� */
		putin_status = KeyPutinChars(SET,DISABLE_SHIFT,x,y,MAX_LEN_DATA_PASSWORD,MAX_LEN_DATA_PASSWORD,&pPP->str_cnt,TYPE_INT,&buff_int,&buff_float,buff_char);			
		
		/* ���ܼ�����״̬ */
		if (pKey->key_sta == KEY_PRESS_OK)	
		{
			switch (pKey->key_value)
			{												
				case KEY_ENTER:					
					if (STATUS_EDIT_COMP == putin_status)
					{
						numtochar(pPP->str_cnt,buff_int,pPP->data);		//���ݴ洢
	
						cmp = strcmp(pSuper_Password,pPP->data);	//������������볬�������ƥ��̶ȡ���һ�㡿
	
						if (cmp != 0)	//��ƥ��
						{
							cmp = strcmp(pHmi->paseword_system,pPP->data); //��������������û��Զ�������ƥ��̶ȡ������㡿
	
							if (cmp != 0) 	//��ƥ��
							{
								warn_hint(KEEP,pWarn_Message[19]);	
								
								#ifdef ENABLE_BEEP
									BEEP_RING_WARN();
								#endif
								break;			
							}
						}
	
						return;	//������ȷ�������ý���
					 }
					 break;

				case KEY_ESC:
					return;					
			}
		}		
	}
}
Beispiel #21
0
Datei: alias.c Projekt: wfp5p/elm
static void get_aliases(int are_in_aliases)
{
/*
 *	Get all the system and user alias info
 *
 *	If we get this far, we must be needing to re-read from
 *	at least one data file.  Unfortunately that means we
 *	really need to read both since the aliases may be sorted
 *	and all mixed up...  :-(
 */

	int dups = 0;

	curr_alias = 0;
	num_duplicates = 0;
/*
 *	Read from user data file if it is open.
 */
	if (user_hash != NULL) {
	    dprint(6, (debugfile,
		      "About to read user data file = %s.\n",
	              user_hash->dbz_basefname));
	    fseek(user_hash->dbz_basef, 0L, 0);
	    while (get_one_alias(user_hash, curr_alias)) {
		dprint(8, (debugfile, "%d\t%s\t%s\n", curr_alias+1,
				       aliases[curr_alias]->alias,
				       aliases[curr_alias]->address));

		curr_alias++;
	    }
	}
	num_aliases = curr_alias;		/* Needed for find_alias() */

/*
 *	Read from system data file if it is open.
 */
	if (system_hash != NULL) {
	    dprint(6, (debugfile,
		      "About to read system data file = %s.\n",
	              system_hash->dbz_basefname));
	    fseek(system_hash->dbz_basef, 0L, 0);
	    while (get_one_alias(system_hash, curr_alias)) {
	    /*
	     *  If an identical user alias is found, we may
	     *  not want to display it, so we had better mark it.
	     */
		if (find_alias(aliases[curr_alias]->alias, USER) >= 0) {
		    setit(aliases[curr_alias]->type, DUPLICATE);
		    dups++;
		    setit(aliases[curr_alias]->status, URGENT);
				    /* Not really, I want the U for User */
		    dprint(6, (debugfile,
			       "System alias %s is same as user alias.\n",
			       aliases[curr_alias]->alias));
		}
		dprint(8, (debugfile, "%d\t%s\t%s\n", curr_alias+1,
				       aliases[curr_alias]->alias,
				       aliases[curr_alias]->address));

		curr_alias++;
	    }
	    num_duplicates = dups;
	}
	num_aliases = curr_alias - num_duplicates;

	if (OPMODE_IS_READMODE(opmode) && num_aliases > 0) {
	    curr_alias = 0;
	    sort_aliases((num_aliases+num_duplicates), FALSE, are_in_aliases);
	    curr_alias = 1;
	    if (are_in_aliases) {
	        (void) get_page(curr_alias);
	    }
	}

}
/* ---------------------------------------------------------------------------
**	Description: 	
**			������������
** -------------------------------------------------------------------------- */
void load_password_settings(void)
{
	uint16_t x,y;
	uint8_t last_index = INVALID_INDEX_U8;
	char in_buff[10];

	/* ����GUI���� */
	gui_password_setting();

	while ( get_page() == password_settings )
	{
		/* ��ⰴ������ */
		pKey->key_sta = judge_key_sta(DISABLE_SHIFT);
		
		/* ������ */
		HandlerPageWarn();
		
		x = pPassword->pos[pPassword->index].x + 1;
		y = pPassword->pos[pPassword->index].y + 1;

		/* �������� */
		putin_status = KeyPutinChars(SET,DISABLE_SHIFT,x,y,MAX_LEN_DATA_PASSWORD,MAX_LEN_DATA_PASSWORD,&pPassword->str_cnt,TYPE_INT,&buff_int,&buff_float,buff_char);
		
		/* ������ı�����ֵ */
		dir_key_index_change(SET,3,1,3,&pPassword->index);

		/* ����״̬ */
		password_judge_sta();

		/* �ı��� */
		password_change_cursor(&last_index,&pPassword->index);
		
		/* �л���ݲ˵� */
		HideShortcutMenu(PAGE_SYSTEM_PASSWORD_SET);

		/* ���ܼ�����״̬ */
		if (pKey->key_sta == KEY_PRESS_OK)	
		{
			switch (pKey->key_value)
			{
				case KEY_F3:		//����						
					/* ������ */
					if (putin_password_check() != 0)	
					{
						break;
					}

					strcpy(pHmi->paseword_system,pPassword->data[1].buff);	//�洢�����뵽FLASH
					pcm_save();

					warn_hint(KEEP,pWarn_Message[22]);
					break;

				case KEY_F4:		//ȡ��
					set_page(system_set);
					break;

				case KEY_ENTER:				
					if (STATUS_EDIT_COMP == putin_status)
					{
						numtochar(pPassword->str_cnt,buff_int,in_buff);	  //��ֵ������
					
						/* �Ƿ��ַ���� */
						if ( ERROR == data_check_point(in_buff) )
						{
							#ifdef ENABLE_BEEP
								BEEP_RING_WARN();
							#endif
							warn_hint(KEEP,pWarn_Message[1]);
							break;
						}

						/* ��������ַ����� */
						if(pPassword->str_cnt < 1)
						{
							#ifdef ENABLE_BEEP
								BEEP_RING_WARN();
							#endif
							warn_hint(KEEP,pWarn_Message[7]);
							break;
						}

						numtochar(pPassword->str_cnt,buff_int,pPassword->data[pPassword->index].buff);
						pPassword->index++;
					}		
					break;

				case KEY_ESC:
					if (STATUS_CANCEL_PUTIN == putin_status)	
					{
						pPassword->index++;
					}
					else
					{
						set_page(system_set);
					}		
					break;
			}
		}
	}
}
int cxgb3i_conn_init_pdu(struct iscsi_task *task, unsigned int offset,
			      unsigned int count)
{
	struct iscsi_conn *conn = task->conn;
	struct iscsi_tcp_task *tcp_task = task->dd_data;
	struct cxgb3i_task_data *tdata = tcp_task->dd_data;
	struct sk_buff *skb = tdata->skb;
	unsigned int datalen = count;
	int i, padlen = iscsi_padding(count);
	struct page *pg;

	cxgb3i_tx_debug("task 0x%p,0x%p, offset %u, count %u, skb 0x%p.\n",
			task, task->sc, offset, count, skb);

	skb_put(skb, task->hdr_len);
	tx_skb_setmode(skb, conn->hdrdgst_en, datalen ? conn->datadgst_en : 0);
	if (!count)
		return 0;

	if (task->sc) {
		struct scsi_data_buffer *sdb = scsi_out(task->sc);
		struct scatterlist *sg = NULL;
		int err;

		tdata->offset = offset;
		tdata->count = count;
		err = sgl_seek_offset(sdb->table.sgl, sdb->table.nents,
					tdata->offset, &tdata->sgoffset, &sg);
		if (err < 0) {
			cxgb3i_log_warn("tpdu, sgl %u, bad offset %u/%u.\n",
					sdb->table.nents, tdata->offset,
					sdb->length);
			return err;
		}
		err = sgl_read_to_frags(sg, tdata->sgoffset, tdata->count,
					tdata->frags, MAX_PDU_FRAGS);
		if (err < 0) {
			cxgb3i_log_warn("tpdu, sgl %u, bad offset %u + %u.\n",
					sdb->table.nents, tdata->offset,
					tdata->count);
			return err;
		}
		tdata->nr_frags = err;

		if (tdata->nr_frags > MAX_SKB_FRAGS ||
		    (padlen && tdata->nr_frags == MAX_SKB_FRAGS)) {
			char *dst = skb->data + task->hdr_len;
			skb_frag_t *frag = tdata->frags;

			/* data fits in the skb's headroom */
			for (i = 0; i < tdata->nr_frags; i++, frag++) {
				char *src = kmap_atomic(frag->page,
							KM_SOFTIRQ0);

				memcpy(dst, src+frag->page_offset, frag->size);
				dst += frag->size;
				kunmap_atomic(src, KM_SOFTIRQ0);
			}
			if (padlen) {
				memset(dst, 0, padlen);
				padlen = 0;
			}
			skb_put(skb, count + padlen);
		} else {
			/* data fit into frag_list */
			for (i = 0; i < tdata->nr_frags; i++)
				get_page(tdata->frags[i].page);

			memcpy(skb_shinfo(skb)->frags, tdata->frags,
				sizeof(skb_frag_t) * tdata->nr_frags);
			skb_shinfo(skb)->nr_frags = tdata->nr_frags;
			skb->len += count;
			skb->data_len += count;
			skb->truesize += count;
		}

	} else {
		pg = virt_to_page(task->data);

		get_page(pg);
		skb_fill_page_desc(skb, 0, pg, offset_in_page(task->data),
					count);
		skb->len += count;
		skb->data_len += count;
		skb->truesize += count;
	}

	if (padlen) {
		i = skb_shinfo(skb)->nr_frags;
		get_page(pad_page);
		skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags, pad_page, 0,
				 padlen);

		skb->data_len += padlen;
		skb->truesize += padlen;
		skb->len += padlen;
	}

	return 0;
}
Beispiel #24
0
static int yaffs_writepage(struct page *page)
#endif
{
	struct address_space *mapping = page->mapping;
	loff_t offset = (loff_t) page->index << PAGE_CACHE_SHIFT;
	struct inode *inode;
	unsigned long end_index;
	char *buffer;
	yaffs_Object *obj;
	int nWritten = 0;
	unsigned nBytes;

	if (!mapping)
		BUG();
	inode = mapping->host;
	if (!inode)
		BUG();

	if (offset > inode->i_size) {
		T(YAFFS_TRACE_OS,
		  (KERN_DEBUG
		   "yaffs_writepage at %08x, inode size = %08x!!!\n",
		   (unsigned)(page->index << PAGE_CACHE_SHIFT),
		   (unsigned)inode->i_size));
		T(YAFFS_TRACE_OS,
		  (KERN_DEBUG "                -> don't care!!\n"));
		unlock_page(page);
		return 0;
	}

	end_index = inode->i_size >> PAGE_CACHE_SHIFT;

	
	if (page->index < end_index) {
		nBytes = PAGE_CACHE_SIZE;
	} else {
		nBytes = inode->i_size & (PAGE_CACHE_SIZE - 1);
	}

	get_page(page);

	buffer = kmap(page);

	obj = yaffs_InodeToObject(inode);
	yaffs_GrossLock(obj->myDev);

	T(YAFFS_TRACE_OS,
	  (KERN_DEBUG "yaffs_writepage at %08x, size %08x\n",
	   (unsigned)(page->index << PAGE_CACHE_SHIFT), nBytes));
	T(YAFFS_TRACE_OS,
	  (KERN_DEBUG "writepag0: obj = %05x, ino = %05x\n",
	   (int)obj->variant.fileVariant.fileSize, (int)inode->i_size));

	nWritten =
	    yaffs_WriteDataToFile(obj, buffer, page->index << PAGE_CACHE_SHIFT,
				  nBytes, 0);

	T(YAFFS_TRACE_OS,
	  (KERN_DEBUG "writepag1: obj = %05x, ino = %05x\n",
	   (int)obj->variant.fileVariant.fileSize, (int)inode->i_size));

	yaffs_GrossUnlock(obj->myDev);

	kunmap(page);
	SetPageUptodate(page);
	UnlockPage(page);
	put_page(page);

	return (nWritten == nBytes) ? 0 : -ENOSPC;
}
/*
 * Hacked from kernel function __get_user_pages in mm/memory.c
 *
 * Handle buffers allocated by other kernel space driver and mmaped into user
 * space, function Ignore the VM_PFNMAP and VM_IO flag in VMA structure
 *
 * Get physical pages from user space virtual address and update into page list
 */
static int __get_pfnmap_pages(struct task_struct *tsk, struct mm_struct *mm,
			      unsigned long start, int nr_pages,
			      unsigned int gup_flags, struct page **pages,
			      struct vm_area_struct **vmas)
{
	int i, ret;
	unsigned long vm_flags;

	if (nr_pages <= 0)
		return 0;

	VM_BUG_ON(!!pages != !!(gup_flags & FOLL_GET));

	/*
	 * Require read or write permissions.
	 * If FOLL_FORCE is set, we only require the "MAY" flags.
	 */
	vm_flags  = (gup_flags & FOLL_WRITE) ?
			(VM_WRITE | VM_MAYWRITE) : (VM_READ | VM_MAYREAD);
	vm_flags &= (gup_flags & FOLL_FORCE) ?
			(VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
	i = 0;

	do {
		struct vm_area_struct *vma;

		vma = find_vma(mm, start);
		if (!vma) {
			dev_err(atomisp_dev, "find_vma failed\n");
			return i ? : -EFAULT;
		}

		if (is_vm_hugetlb_page(vma)) {
			/*
			i = follow_hugetlb_page(mm, vma, pages, vmas,
					&start, &nr_pages, i, gup_flags);
			*/
			continue;
		}

		do {
			struct page *page;
			unsigned long pfn;

			/*
			 * If we have a pending SIGKILL, don't keep faulting
			 * pages and potentially allocating memory.
			 */
			if (unlikely(fatal_signal_pending(current))) {
				dev_err(atomisp_dev,
					"fatal_signal_pending in %s\n",
					__func__);
				return i ? i : -ERESTARTSYS;
			}

			ret = follow_pfn(vma, start, &pfn);
			if (ret) {
				dev_err(atomisp_dev, "follow_pfn() failed\n");
				return i ? : -EFAULT;
			}

			page = pfn_to_page(pfn);
			if (IS_ERR(page))
				return i ? i : PTR_ERR(page);
			if (pages) {
				pages[i] = page;
				get_page(page);
				flush_anon_page(vma, page, start);
				flush_dcache_page(page);
			}
			if (vmas)
				vmas[i] = vma;
			i++;
			start += PAGE_SIZE;
			nr_pages--;
		} while (nr_pages && start < vma->vm_end);
	} while (nr_pages);
Beispiel #26
0
int ext4_bio_write_page(struct ext4_io_submit *io,
			struct page *page,
			int len,
			struct writeback_control *wbc)
{
	struct inode *inode = page->mapping->host;
	unsigned block_start, block_end, blocksize;
	struct ext4_io_page *io_page;
	struct buffer_head *bh, *head;
	int ret = 0;

	blocksize = 1 << inode->i_blkbits;

	BUG_ON(!PageLocked(page));
	BUG_ON(PageWriteback(page));

	io_page = kmem_cache_alloc(io_page_cachep, GFP_NOFS);
	if (!io_page) {
		set_page_dirty(page);
		unlock_page(page);
		return -ENOMEM;
	}
	io_page->p_page = page;
	atomic_set(&io_page->p_count, 1);
	get_page(page);
	set_page_writeback(page);
	ClearPageError(page);

	for (bh = head = page_buffers(page), block_start = 0;
	     bh != head || !block_start;
	     block_start = block_end, bh = bh->b_this_page) {

		block_end = block_start + blocksize;
		if (block_start >= len) {
			clear_buffer_dirty(bh);
			set_buffer_uptodate(bh);
			continue;
		}
		clear_buffer_dirty(bh);
		ret = io_submit_add_bh(io, io_page, inode, wbc, bh);
		if (ret) {
			/*
			 * We only get here on ENOMEM.  Not much else
			 * we can do but mark the page as dirty, and
			 * better luck next time.
			 */
			set_page_dirty(page);
			break;
		}
	}
	unlock_page(page);
	/*
	 * If the page was truncated before we could do the writeback,
	 * or we had a memory allocation error while trying to write
	 * the first buffer head, we won't have submitted any pages for
	 * I/O.  In that case we need to make sure we've cleared the
	 * PageWriteback bit from the page to prevent the system from
	 * wedging later on.
	 */
	put_io_page(io_page);
	return ret;
}
Beispiel #27
0
static void populate_physmap(struct memop_args *a)
{
    struct page_info *page;
    unsigned int i, j;
    xen_pfn_t gpfn, mfn;
    struct domain *d = a->domain;

    if ( !guest_handle_subrange_okay(a->extent_list, a->nr_done,
                                     a->nr_extents-1) )
        return;

    if ( a->extent_order > (a->memflags & MEMF_populate_on_demand ? MAX_ORDER :
                            max_order(current->domain)) )
        return;

    for ( i = a->nr_done; i < a->nr_extents; i++ )
    {
        if ( i != a->nr_done && hypercall_preempt_check() )
        {
            a->preempted = 1;
            goto out;
        }

        if ( unlikely(__copy_from_guest_offset(&gpfn, a->extent_list, i, 1)) )
            goto out;

        if ( a->memflags & MEMF_populate_on_demand )
        {
            if ( guest_physmap_mark_populate_on_demand(d, gpfn,
                                                       a->extent_order) < 0 )
                goto out;
        }
        else
        {
            if ( is_domain_direct_mapped(d) )
            {
                mfn = gpfn;

                for ( j = 0; j < (1U << a->extent_order); j++, mfn++ )
                {
                    if ( !mfn_valid(mfn) )
                    {
                        gdprintk(XENLOG_INFO, "Invalid mfn %#"PRI_xen_pfn"\n",
                                 mfn);
                        goto out;
                    }

                    page = mfn_to_page(mfn);
                    if ( !get_page(page, d) )
                    {
                        gdprintk(XENLOG_INFO,
                                 "mfn %#"PRI_xen_pfn" doesn't belong to d%d\n",
                                  mfn, d->domain_id);
                        goto out;
                    }
                    put_page(page);
                }

                mfn = gpfn;
                page = mfn_to_page(mfn);
            }
            else
            {
                page = alloc_domheap_pages(d, a->extent_order, a->memflags);

                if ( unlikely(!page) )
                {
                    if ( !opt_tmem || a->extent_order )
                        gdprintk(XENLOG_INFO,
                                 "Could not allocate order=%u extent: id=%d memflags=%#x (%u of %u)\n",
                                 a->extent_order, d->domain_id, a->memflags,
                                 i, a->nr_extents);
                    goto out;
                }

                mfn = page_to_mfn(page);
            }

            guest_physmap_add_page(d, gpfn, mfn, a->extent_order);

            if ( !paging_mode_translate(d) )
            {
                for ( j = 0; j < (1U << a->extent_order); j++ )
                    set_gpfn_from_mfn(mfn + j, gpfn + j);

                /* Inform the domain of the new page's machine address. */ 
                if ( unlikely(__copy_to_guest_offset(a->extent_list, i, &mfn, 1)) )
                    goto out;
            }
        }
    }

out:
    a->nr_done = i;
}
Beispiel #28
0
int ext4_bio_write_page(struct ext4_io_submit *io,
			struct page *page,
			int len,
			struct writeback_control *wbc)
{
	struct inode *inode = page->mapping->host;
	unsigned block_start, block_end, blocksize;
	struct ext4_io_page *io_page;
	struct buffer_head *bh, *head;
	int ret = 0;

	blocksize = 1 << inode->i_blkbits;

	BUG_ON(!PageLocked(page));
	BUG_ON(PageWriteback(page));

	io_page = kmem_cache_alloc(io_page_cachep, GFP_NOFS);
	if (!io_page) {
		set_page_dirty(page);
		unlock_page(page);
		return -ENOMEM;
	}
	io_page->p_page = page;
	atomic_set(&io_page->p_count, 1);
	get_page(page);
	set_page_writeback(page);
	ClearPageError(page);

	/*
	 * Comments copied from block_write_full_page_endio:
	 *
	 * The page straddles i_size.  It must be zeroed out on each and every
	 * writepage invocation because it may be mmapped.  "A file is mapped
	 * in multiples of the page size.  For a file that is not a multiple of
	 * the page size, the remaining memory is zeroed when mapped, and
	 * writes to that region are not written out to the file."
	 */
	if (len < PAGE_CACHE_SIZE)
		zero_user_segment(page, len, PAGE_CACHE_SIZE);

	for (bh = head = page_buffers(page), block_start = 0;
	     bh != head || !block_start;
	     block_start = block_end, bh = bh->b_this_page) {

		block_end = block_start + blocksize;
		if (block_start >= len) {
			clear_buffer_dirty(bh);
			set_buffer_uptodate(bh);
			continue;
		}
		clear_buffer_dirty(bh);
		ret = io_submit_add_bh(io, io_page, inode, wbc, bh);
		if (ret) {
			/*
			 * We only get here on ENOMEM.  Not much else
			 * we can do but mark the page as dirty, and
			 * better luck next time.
			 */
			set_page_dirty(page);
			break;
		}
	}
	unlock_page(page);
	/*
	 * If the page was truncated before we could do the writeback,
	 * or we had a memory allocation error while trying to write
	 * the first buffer head, we won't have submitted any pages for
	 * I/O.  In that case we need to make sure we've cleared the
	 * PageWriteback bit from the page to prevent the system from
	 * wedging later on.
	 */
	put_io_page(io_page);
	return ret;
}
Beispiel #29
0
int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to,
	int offset, int len, int odd, struct sk_buff *skb),
	void *from, int length, int transhdrlen,
	int hlimit, int tclass, struct ipv6_txoptions *opt, struct flowi6 *fl6,
	struct rt6_info *rt, unsigned int flags, int dontfrag)
{
	struct inet_sock *inet = inet_sk(sk);
	struct ipv6_pinfo *np = inet6_sk(sk);
	struct inet_cork *cork;
	struct sk_buff *skb, *skb_prev = NULL;
	unsigned int maxfraglen, fragheaderlen;
	int exthdrlen;
	int hh_len;
	int mtu;
	int copy;
	int err;
	int offset = 0;
	int csummode = CHECKSUM_NONE;
	__u8 tx_flags = 0;

	if (flags&MSG_PROBE)
		return 0;
	cork = &inet->cork.base;
	if (skb_queue_empty(&sk->sk_write_queue)) {
		/*
		 * setup for corking
		 */
		if (opt) {
			if (WARN_ON(np->cork.opt))
				return -EINVAL;

			np->cork.opt = kzalloc(opt->tot_len, sk->sk_allocation);
			if (unlikely(np->cork.opt == NULL))
				return -ENOBUFS;

			np->cork.opt->tot_len = opt->tot_len;
			np->cork.opt->opt_flen = opt->opt_flen;
			np->cork.opt->opt_nflen = opt->opt_nflen;

			np->cork.opt->dst0opt = ip6_opt_dup(opt->dst0opt,
							    sk->sk_allocation);
			if (opt->dst0opt && !np->cork.opt->dst0opt)
				return -ENOBUFS;

			np->cork.opt->dst1opt = ip6_opt_dup(opt->dst1opt,
							    sk->sk_allocation);
			if (opt->dst1opt && !np->cork.opt->dst1opt)
				return -ENOBUFS;

			np->cork.opt->hopopt = ip6_opt_dup(opt->hopopt,
							   sk->sk_allocation);
			if (opt->hopopt && !np->cork.opt->hopopt)
				return -ENOBUFS;

			np->cork.opt->srcrt = ip6_rthdr_dup(opt->srcrt,
							    sk->sk_allocation);
			if (opt->srcrt && !np->cork.opt->srcrt)
				return -ENOBUFS;

			/* need source address above miyazawa*/
		}
		dst_hold(&rt->dst);
		cork->dst = &rt->dst;
		inet->cork.fl.u.ip6 = *fl6;
		np->cork.hop_limit = hlimit;
		np->cork.tclass = tclass;
		if (rt->dst.flags & DST_XFRM_TUNNEL)
			mtu = np->pmtudisc == IPV6_PMTUDISC_PROBE ?
			      rt->dst.dev->mtu : dst_mtu(&rt->dst);
		else
			mtu = np->pmtudisc == IPV6_PMTUDISC_PROBE ?
			      rt->dst.dev->mtu : dst_mtu(rt->dst.path);
		if (np->frag_size < mtu) {
			if (np->frag_size)
				mtu = np->frag_size;
		}
		cork->fragsize = mtu;
		if (dst_allfrag(rt->dst.path))
			cork->flags |= IPCORK_ALLFRAG;
		cork->length = 0;
		sk->sk_sndmsg_page = NULL;
		sk->sk_sndmsg_off = 0;
		exthdrlen = rt->dst.header_len + (opt ? opt->opt_flen : 0) -
			    rt->rt6i_nfheader_len;
		length += exthdrlen;
		transhdrlen += exthdrlen;
	} else {
		rt = (struct rt6_info *)cork->dst;
		fl6 = &inet->cork.fl.u.ip6;
		opt = np->cork.opt;
		transhdrlen = 0;
		exthdrlen = 0;
		mtu = cork->fragsize;
	}

	hh_len = LL_RESERVED_SPACE(rt->dst.dev);

	fragheaderlen = sizeof(struct ipv6hdr) + rt->rt6i_nfheader_len +
			(opt ? opt->opt_nflen : 0);
	maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen - sizeof(struct frag_hdr);

	if (mtu <= sizeof(struct ipv6hdr) + IPV6_MAXPLEN) {
		if (cork->length + length > sizeof(struct ipv6hdr) + IPV6_MAXPLEN - fragheaderlen) {
			ipv6_local_error(sk, EMSGSIZE, fl6, mtu-exthdrlen);
			return -EMSGSIZE;
		}
	}

	/* For UDP, check if TX timestamp is enabled */
	if (sk->sk_type == SOCK_DGRAM) {
		err = sock_tx_timestamp(sk, &tx_flags);
		if (err)
			goto error;
	}

	/*
	 * Let's try using as much space as possible.
	 * Use MTU if total length of the message fits into the MTU.
	 * Otherwise, we need to reserve fragment header and
	 * fragment alignment (= 8-15 octects, in total).
	 *
	 * Note that we may need to "move" the data from the tail of
	 * of the buffer to the new fragment when we split
	 * the message.
	 *
	 * FIXME: It may be fragmented into multiple chunks
	 *        at once if non-fragmentable extension headers
	 *        are too large.
	 * --yoshfuji
	 */

	cork->length += length;
	if (length > mtu) {
		int proto = sk->sk_protocol;
		if (dontfrag && (proto == IPPROTO_UDP || proto == IPPROTO_RAW)){
			ipv6_local_rxpmtu(sk, fl6, mtu-exthdrlen);
			return -EMSGSIZE;
		}

		if (proto == IPPROTO_UDP &&
		    (rt->dst.dev->features & NETIF_F_UFO)) {

			err = ip6_ufo_append_data(sk, getfrag, from, length,
						  hh_len, fragheaderlen,
						  transhdrlen, mtu, flags, rt);
			if (err)
				goto error;
			return 0;
		}
	}

	if ((skb = skb_peek_tail(&sk->sk_write_queue)) == NULL)
		goto alloc_new_skb;

	while (length > 0) {
		/* Check if the remaining data fits into current packet. */
		copy = (cork->length <= mtu && !(cork->flags & IPCORK_ALLFRAG) ? mtu : maxfraglen) - skb->len;
		if (copy < length)
			copy = maxfraglen - skb->len;

		if (copy <= 0) {
			char *data;
			unsigned int datalen;
			unsigned int fraglen;
			unsigned int fraggap;
			unsigned int alloclen;
alloc_new_skb:
			/* There's no room in the current skb */
			if (skb)
				fraggap = skb->len - maxfraglen;
			else
				fraggap = 0;
			/* update mtu and maxfraglen if necessary */
			if (skb == NULL || skb_prev == NULL)
				ip6_append_data_mtu(&mtu, &maxfraglen,
						    fragheaderlen, skb, rt);

			skb_prev = skb;

			/*
			 * If remaining data exceeds the mtu,
			 * we know we need more fragment(s).
			 */
			datalen = length + fraggap;

			if (datalen > (cork->length <= mtu && !(cork->flags & IPCORK_ALLFRAG) ? mtu : maxfraglen) - fragheaderlen)
				datalen = maxfraglen - fragheaderlen - rt->dst.trailer_len;
			if ((flags & MSG_MORE) &&
			    !(rt->dst.dev->features&NETIF_F_SG))
				alloclen = mtu;
			else
				alloclen = datalen + fragheaderlen;

			if (datalen != length + fraggap) {
				/*
				 * this is not the last fragment, the trailer
				 * space is regarded as data space.
				 */
				datalen += rt->dst.trailer_len;
			}

			alloclen += rt->dst.trailer_len;
			fraglen = datalen + fragheaderlen;

			/*
			 * We just reserve space for fragment header.
			 * Note: this may be overallocation if the message
			 * (without MSG_MORE) fits into the MTU.
			 */
			alloclen += sizeof(struct frag_hdr);

			if (transhdrlen) {
				skb = sock_alloc_send_skb(sk,
						alloclen + hh_len,
						(flags & MSG_DONTWAIT), &err);
			} else {
				skb = NULL;
				if (atomic_read(&sk->sk_wmem_alloc) <=
				    2 * sk->sk_sndbuf)
					skb = sock_wmalloc(sk,
							   alloclen + hh_len, 1,
							   sk->sk_allocation);
				if (unlikely(skb == NULL))
					err = -ENOBUFS;
				else {
					/* Only the initial fragment
					 * is time stamped.
					 */
					tx_flags = 0;
				}
			}
			if (skb == NULL)
				goto error;
			/*
			 *	Fill in the control structures
			 */
			skb->ip_summed = csummode;
			skb->csum = 0;
			/* reserve for fragmentation */
			skb_reserve(skb, hh_len+sizeof(struct frag_hdr));

			if (sk->sk_type == SOCK_DGRAM)
				skb_shinfo(skb)->tx_flags = tx_flags;

			/*
			 *	Find where to start putting bytes
			 */
			data = skb_put(skb, fraglen);
			skb_set_network_header(skb, exthdrlen);
			data += fragheaderlen;
			skb->transport_header = (skb->network_header +
						 fragheaderlen);
			if (fraggap) {
				skb->csum = skb_copy_and_csum_bits(
					skb_prev, maxfraglen,
					data + transhdrlen, fraggap, 0);
				skb_prev->csum = csum_sub(skb_prev->csum,
							  skb->csum);
				data += fraggap;
				pskb_trim_unique(skb_prev, maxfraglen);
			}
			copy = datalen - transhdrlen - fraggap;
			if (copy < 0) {
				err = -EINVAL;
				kfree_skb(skb);
				goto error;
			} else if (copy > 0 && getfrag(from, data + transhdrlen, offset, copy, fraggap, skb) < 0) {
				err = -EFAULT;
				kfree_skb(skb);
				goto error;
			}

			offset += copy;
			length -= datalen - fraggap;
			transhdrlen = 0;
			exthdrlen = 0;
			csummode = CHECKSUM_NONE;

			/*
			 * Put the packet on the pending queue
			 */
			__skb_queue_tail(&sk->sk_write_queue, skb);
			continue;
		}

		if (copy > length)
			copy = length;

		if (!(rt->dst.dev->features&NETIF_F_SG)) {
			unsigned int off;

			off = skb->len;
			if (getfrag(from, skb_put(skb, copy),
						offset, copy, off, skb) < 0) {
				__skb_trim(skb, off);
				err = -EFAULT;
				goto error;
			}
		} else {
			int i = skb_shinfo(skb)->nr_frags;
			skb_frag_t *frag = &skb_shinfo(skb)->frags[i-1];
			struct page *page = sk->sk_sndmsg_page;
			int off = sk->sk_sndmsg_off;
			unsigned int left;

			if (page && (left = PAGE_SIZE - off) > 0) {
				if (copy >= left)
					copy = left;
				if (page != frag->page) {
					if (i == MAX_SKB_FRAGS) {
						err = -EMSGSIZE;
						goto error;
					}
					get_page(page);
					skb_fill_page_desc(skb, i, page, sk->sk_sndmsg_off, 0);
					frag = &skb_shinfo(skb)->frags[i];
				}
			} else if(i < MAX_SKB_FRAGS) {
				if (copy > PAGE_SIZE)
					copy = PAGE_SIZE;
				page = alloc_pages(sk->sk_allocation, 0);
				if (page == NULL) {
					err = -ENOMEM;
					goto error;
				}
				sk->sk_sndmsg_page = page;
				sk->sk_sndmsg_off = 0;

				skb_fill_page_desc(skb, i, page, 0, 0);
				frag = &skb_shinfo(skb)->frags[i];
			} else {
				err = -EMSGSIZE;
				goto error;
			}
			if (getfrag(from, page_address(frag->page)+frag->page_offset+frag->size, offset, copy, skb->len, skb) < 0) {
				err = -EFAULT;
				goto error;
			}
			sk->sk_sndmsg_off += copy;
			frag->size += copy;
			skb->len += copy;
			skb->data_len += copy;
			skb->truesize += copy;
			atomic_add(copy, &sk->sk_wmem_alloc);
		}
		offset += copy;
		length -= copy;
	}
	return 0;
error:
	cork->length -= length;
	IP6_INC_STATS(sock_net(sk), rt->rt6i_idev, IPSTATS_MIB_OUTDISCARDS);
	return err;
}
Beispiel #30
0
/**
 * efx_init_rx_buffer_page - create new RX buffer using page-based allocation
 *
 * @rx_queue:		Efx RX queue
 * @rx_buf:		RX buffer structure to populate
 *
 * This allocates memory for a new receive buffer, maps it for DMA,
 * and populates a struct efx_rx_buffer with the relevant
 * information.  Return a negative error code or 0 on success.
 */
static int efx_init_rx_buffer_page(struct efx_rx_queue *rx_queue,
				   struct efx_rx_buffer *rx_buf)
{
	struct efx_nic *efx = rx_queue->efx;
	int bytes, space, offset;

	bytes = efx->rx_buffer_len - EFX_PAGE_IP_ALIGN;

	/* If there is space left in the previously allocated page,
	 * then use it. Otherwise allocate a new one */
	rx_buf->page = rx_queue->buf_page;
	if (rx_buf->page == NULL) {
		dma_addr_t dma_addr;

		rx_buf->page = alloc_pages(__GFP_COLD | __GFP_COMP | GFP_ATOMIC,
					   efx->rx_buffer_order);
		if (unlikely(rx_buf->page == NULL))
			return -ENOMEM;

		dma_addr = pci_map_page(efx->pci_dev, rx_buf->page,
					0, efx_rx_buf_size(efx),
					PCI_DMA_FROMDEVICE);

		if (unlikely(pci_dma_mapping_error(efx->pci_dev, dma_addr))) {
			__free_pages(rx_buf->page, efx->rx_buffer_order);
			rx_buf->page = NULL;
			return -EIO;
		}

		rx_queue->buf_page = rx_buf->page;
		rx_queue->buf_dma_addr = dma_addr;
		rx_queue->buf_data = (page_address(rx_buf->page) +
				      EFX_PAGE_IP_ALIGN);
	}

	rx_buf->len = bytes;
	rx_buf->data = rx_queue->buf_data;
	offset = efx_rx_buf_offset(rx_buf);
	rx_buf->dma_addr = rx_queue->buf_dma_addr + offset;

	/* Try to pack multiple buffers per page */
	if (efx->rx_buffer_order == 0) {
		/* The next buffer starts on the next 512 byte boundary */
		rx_queue->buf_data += ((bytes + 0x1ff) & ~0x1ff);
		offset += ((bytes + 0x1ff) & ~0x1ff);

		space = efx_rx_buf_size(efx) - offset;
		if (space >= bytes) {
			/* Refs dropped on kernel releasing each skb */
			get_page(rx_queue->buf_page);
			goto out;
		}
	}

	/* This is the final RX buffer for this page, so mark it for
	 * unmapping */
	rx_queue->buf_page = NULL;
	rx_buf->unmap_addr = rx_queue->buf_dma_addr;

 out:
	return 0;
}