/* * Copy existing mapping into a newly created one */ void clone_map(struct mapping *dst, struct mapping *map) { u32 *src; u32 *pgtable; int d, t; dst->m_nmma = 0; dst->m_pgdir = get_pages(0, 0); dst->m_pgtable = pgtable = get_pages(0, log2((phys_pgs + PGT_ENTRIES - 1)/PGT_ENTRIES)); for (d = 0; d < PGD_ENTRIES; d++) { /* copy flags as-is */ dst->m_pgdir[d] = map->m_pgdir[d] & PAGE_MASK; dst->m_pgdir[d] |= (u32)pgtable; src = (u32 *)(map->m_pgdir[d] & NOPAGE_MASK); if (dst->m_pgdir[d] & PTF_PRESENT) { /* copy all ptes */ for (t = 0; t < PGT_ENTRIES; t++) pgtable[t] = src[t]; } /* leave holes in page table for quicker access */ pgtable += PAGE_SIZE / PTE_BYTES; } }
static void f2fs_write_end_io(struct bio *bio) { struct f2fs_sb_info *sbi = bio->bi_private; struct bio_vec *bvec; int i; bio_for_each_segment_all(bvec, bio, i) { struct page *page = bvec->bv_page; f2fs_restore_and_release_control_page(&page); if (unlikely(bio->bi_error)) { set_page_dirty(page); set_bit(AS_EIO, &page->mapping->flags); f2fs_stop_checkpoint(sbi); } end_page_writeback(page); dec_page_count(sbi, F2FS_WRITEBACK); } if (!get_pages(sbi, F2FS_WRITEBACK) && !list_empty(&sbi->cp_wait.task_list)) wake_up(&sbi->cp_wait); bio_put(bio); }
/* should be called under struct_mutex.. although it can be called * from atomic context without struct_mutex to acquire an extra * iova ref if you know one is already held. * * That means when I do eventually need to add support for unpinning * the refcnt counter needs to be atomic_t. */ int msm_gem_get_iova_locked(struct drm_gem_object *obj, int id, uint32_t *iova) { struct msm_gem_object *msm_obj = to_msm_bo(obj); int ret = 0; if (!msm_obj->domain[id].iova) { struct msm_drm_private *priv = obj->dev->dev_private; struct page **pages = get_pages(obj); if (IS_ERR(pages)) return PTR_ERR(pages); if (iommu_present(&platform_bus_type)) { struct msm_mmu *mmu = priv->mmus[id]; uint32_t offset; if (WARN_ON(!mmu)) return -EINVAL; offset = (uint32_t)mmap_offset(obj); ret = mmu->funcs->map(mmu, offset, msm_obj->sgt, obj->size, IOMMU_READ | IOMMU_WRITE); msm_obj->domain[id].iova = offset; } else { msm_obj->domain[id].iova = physaddr(obj); } } if (!ret) *iova = msm_obj->domain[id].iova; return ret; }
static void f2fs_put_super(struct super_block *sb) { struct f2fs_sb_info *sbi = F2FS_SB(sb); if (sbi->s_proc) { remove_proc_entry("segment_info", sbi->s_proc); remove_proc_entry(sb->s_id, f2fs_proc_root); } kobject_del(&sbi->s_kobj); f2fs_destroy_stats(sbi); stop_gc_thread(sbi); /* We don't need to do checkpoint when it's clean */ if (sbi->s_dirty && get_pages(sbi, F2FS_DIRTY_NODES)) write_checkpoint(sbi, true); iput(sbi->node_inode); iput(sbi->meta_inode); /* destroy f2fs internal modules */ destroy_node_manager(sbi); destroy_segment_manager(sbi); kfree(sbi->ckpt); kobject_put(&sbi->s_kobj); wait_for_completion(&sbi->s_kobj_unregister); sb->s_fs_info = NULL; brelse(sbi->raw_super_buf); kfree(sbi); }
/*! \brief find instances of a given string * * Finds instances of a given string and displays the result inside this * widget. * * \param [in] state * \param [in] pages a list of pages to search * \param [in] type the type of find to perform * \param [in] text the text to find * \param [in] descend decend the page heirarchy * \return the number of objects found */ int gschem_find_text_state_find (GschemFindTextState *state, GList *pages, int type, const char *text, gboolean descend) { int count; GSList *objects = NULL; GSList *all_pages; all_pages = get_pages (pages, descend); switch (type) { case FIND_TYPE_SUBSTRING: objects = find_objects_using_substring (all_pages, text); break; case FIND_TYPE_PATTERN: objects = find_objects_using_pattern (all_pages, text); break; case FIND_TYPE_REGEX: objects = find_objects_using_regex (all_pages, text, NULL); break; default: break; } g_slist_free (all_pages); assign_store (state, objects); count = g_slist_length (objects); g_slist_free (objects); return count; }
int msm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) { struct drm_gem_object *obj = vma->vm_private_data; struct drm_device *dev = obj->dev; struct page **pages; unsigned long pfn; pgoff_t pgoff; int ret; /* Make sure we don't parallel update on a fault, nor move or remove * something from beneath our feet */ ret = mutex_lock_interruptible(&dev->struct_mutex); if (ret) goto out; /* make sure we have pages attached now */ pages = get_pages(obj); if (IS_ERR(pages)) { ret = PTR_ERR(pages); goto out_unlock; } /* We don't use vmf->pgoff since that has the fake offset: */ pgoff = ((unsigned long)vmf->virtual_address - vma->vm_start) >> PAGE_SHIFT; pfn = page_to_pfn(pages[pgoff]); VERB("Inserting %p pfn %lx, pa %lx", vmf->virtual_address, pfn, pfn << PAGE_SHIFT); ret = vm_insert_mixed(vma, (unsigned long)vmf->virtual_address, __pfn_to_pfn_t(pfn, PFN_DEV)); out_unlock: mutex_unlock(&dev->struct_mutex); out: switch (ret) { case -EAGAIN: case 0: case -ERESTARTSYS: case -EINTR: case -EBUSY: /* * EBUSY is ok: this just means that another thread * already did the job. */ return VM_FAULT_NOPAGE; case -ENOMEM: return VM_FAULT_OOM; default: return VM_FAULT_SIGBUS; } }
/* * Each device descriptor is followed by the description of its virtqueues. We * specify how many descriptors the virtqueue is to have. */ static void add_virtqueue(struct device *dev, unsigned int num_descs, void (*service)(struct virtqueue *)) { unsigned int pages; struct virtqueue **i, *vq = malloc(sizeof(*vq)); void *p; /* First we need some memory for this virtqueue. */ pages = (vring_size(num_descs, LGUEST_VRING_ALIGN) + getpagesize() - 1) / getpagesize(); p = get_pages(pages); /* Initialize the virtqueue */ vq->next = NULL; vq->last_avail_idx = 0; vq->dev = dev; /* * This is the routine the service thread will run, and its Process ID * once it's running. */ vq->service = service; vq->thread = (pid_t)-1; /* Initialize the configuration. */ vq->config.num = num_descs; vq->config.irq = devices.next_irq++; vq->config.pfn = to_guest_phys(p) / getpagesize(); /* Initialize the vring. */ vring_init(&vq->vring, num_descs, p, LGUEST_VRING_ALIGN); /* * Append virtqueue to this device's descriptor. We use * device_config() to get the end of the device's current virtqueues; * we check that we haven't added any config or feature information * yet, otherwise we'd be overwriting them. */ assert(dev->desc->config_len == 0 && dev->desc->feature_len == 0); memcpy(device_config(dev), &vq->config, sizeof(vq->config)); dev->num_vq++; dev->desc->num_vq++; verbose("Virtqueue page %#lx\n", to_guest_phys(p)); /* * Add to tail of list, so dev->vq is first vq, dev->vq->next is * second. */ for (i = &dev->vq; *i; i = &(*i)->next); *i = vq; }
/* * Initialize a flat 1:1 mapping * which is stored in global root_map */ static void init_rootmap() { u32 pgaddr = 0; int pg_idx = 0; int pgt_idx = 0; u32 tablesz; u32 *pgtable; u32 *pgdir; tablesz = phys_pgs / PGT_ENTRIES; if (phys_pgs % PGT_ENTRIES) tablesz++; root_map.m_pgdir = pgdir = get_pages(0, 0); /* just one page for pgdir */ root_map.m_pgtable = pgtable = get_pages(0, log2(tablesz)); /* Fill the page tables */ while (pg_idx < phys_pgs) pgtable[pg_idx++] = PTE_DESC(pgaddr++, PTF_PRESENT | PTF_RW); /* Clear the remainder of the last page table */ while (pg_idx % PGT_ENTRIES) pgtable[pg_idx++] = 0 | PTF_USER | PTF_RW; /* Add the filled page tables to the page directory */ for (; pgt_idx < tablesz; pgt_idx++) { pgdir[pgt_idx] = (u32)(pgtable + pgt_idx * PGT_ENTRIES) | PTF_PRESENT | PTF_RW | PTF_GLOBAL; } while (pgt_idx < PGT_ENTRIES) pgdir[pgt_idx++] = 0 | PTF_USER | PTF_RW; #ifdef DEBUG display_map(&root_map); #endif }
int f2fs_sync_fs(struct super_block *sb, int sync) { struct f2fs_sb_info *sbi = F2FS_SB(sb); int ret = 0; if (!sbi->s_dirty && !get_pages(sbi, F2FS_DIRTY_NODES)) return 0; if (sync) write_checkpoint(sbi, false, false); return ret; }
static void *bfin_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs) { void *ret; ret = (void *)__alloc_dma_pages(get_pages(size)); if (ret) { memset(ret, 0, size); *dma_handle = virt_to_phys(ret); } return ret; }
void *dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t gfp) { void *ret; ret = (void *)__alloc_dma_pages(get_pages(size)); if (ret) { memset(ret, 0, size); *dma_handle = virt_to_phys(ret); } return ret; }
void *msm_gem_vaddr_locked(struct drm_gem_object *obj) { struct msm_gem_object *msm_obj = to_msm_bo(obj); WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex)); if (!msm_obj->vaddr) { struct page **pages = get_pages(obj); if (IS_ERR(pages)) return ERR_CAST(pages); msm_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT, VM_MAP, pgprot_writecombine(PAGE_KERNEL)); if (msm_obj->vaddr == NULL) return ERR_PTR(-ENOMEM); } return msm_obj->vaddr; }
/* should be called under struct_mutex.. although it can be called * from atomic context without struct_mutex to acquire an extra * iova ref if you know one is already held. * * That means when I do eventually need to add support for unpinning * the refcnt counter needs to be atomic_t. */ int msm_gem_get_iova_locked(struct drm_gem_object *obj, int id, uint64_t *iova) { struct msm_gem_object *msm_obj = to_msm_bo(obj); int ret = 0; if (!msm_obj->domain[id].iova) { struct msm_drm_private *priv = obj->dev->dev_private; struct page **pages = get_pages(obj); if (IS_ERR(pages)) return PTR_ERR(pages); if (iommu_present(&platform_bus_type)) { ret = msm_gem_map_vma(priv->aspace[id], &msm_obj->domain[id], msm_obj->sgt, obj->size >> PAGE_SHIFT); } else {
int f2fs_sync_fs(struct super_block *sb, int sync) { struct f2fs_sb_info *sbi = F2FS_SB(sb); trace_f2fs_sync_fs(sb, sync); if (!sbi->s_dirty && !get_pages(sbi, F2FS_DIRTY_NODES)) return 0; if (sync) { mutex_lock(&sbi->gc_mutex); write_checkpoint(sbi, false); mutex_unlock(&sbi->gc_mutex); } else { f2fs_balance_fs(sbi); } return 0; }
static void add_virtqueue(struct device *dev, unsigned int num_descs, void (*service)(struct virtqueue *)) { unsigned int pages; struct virtqueue **i, *vq = malloc(sizeof(*vq)); void *p; pages = (vring_size(num_descs, LGUEST_VRING_ALIGN) + getpagesize() - 1) / getpagesize(); p = get_pages(pages); vq->next = NULL; vq->last_avail_idx = 0; vq->dev = dev; vq->service = service; vq->thread = (pid_t)-1; vq->config.num = num_descs; vq->config.irq = devices.next_irq++; vq->config.pfn = to_guest_phys(p) / getpagesize(); vring_init(&vq->vring, num_descs, p, LGUEST_VRING_ALIGN); assert(dev->desc->config_len == 0 && dev->desc->feature_len == 0); memcpy(device_config(dev), &vq->config, sizeof(vq->config)); dev->num_vq++; dev->desc->num_vq++; verbose("Virtqueue page %#lx\n", to_guest_phys(p)); for (i = &dev->vq; *i; i = &(*i)->next); *i = vq; }
static void bfin_dma_free(struct device *dev, size_t size, void *vaddr, dma_addr_t dma_handle, unsigned long attrs) { __free_dma_pages((unsigned long)vaddr, get_pages(size)); }
int create_task(tid_t* id, entry_point_t ep, void* arg, uint8_t prio) { int ret = -ENOMEM; uint32_t i; if (BUILTIN_EXPECT(!ep, 0)) return -EINVAL; if (BUILTIN_EXPECT(prio == IDLE_PRIO, 0)) return -EINVAL; if (BUILTIN_EXPECT(prio > MAX_PRIO, 0)) return -EINVAL; spinlock_irqsave_lock(&table_lock); for(i=0; i<MAX_TASKS; i++) { if (task_table[i].status == TASK_INVALID) { task_table[i].id = i; task_table[i].status = TASK_READY; task_table[i].last_stack_pointer = NULL; task_table[i].stack = create_stack(i); task_table[i].prio = prio; spinlock_init(&task_table[i].vma_lock); task_table[i].vma_list = NULL; task_table[i].heap = NULL; task_table[i].wait_id = -1; spinlock_irqsave_init(&task_table[i].page_lock); atomic_int32_set(&task_table[i].user_usage, 0); /* Allocated new PGD or PML4 and copy page table */ task_table[i].page_map = get_pages(1); if (BUILTIN_EXPECT(!task_table[i].page_map, 0)) goto out; /* Copy page tables & user frames of current task to new one */ page_map_copy(&task_table[i]); if (id) *id = i; ret = create_default_frame(task_table+i, ep, arg); // add task in the readyqueues spinlock_irqsave_lock(&readyqueues.lock); readyqueues.prio_bitmap |= (1 << prio); readyqueues.nr_tasks++; if (!readyqueues.queue[prio-1].first) { task_table[i].next = task_table[i].prev = NULL; readyqueues.queue[prio-1].first = task_table+i; readyqueues.queue[prio-1].last = task_table+i; } else { task_table[i].prev = readyqueues.queue[prio-1].last; task_table[i].next = NULL; readyqueues.queue[prio-1].last->next = task_table+i; readyqueues.queue[prio-1].last = task_table+i; } spinlock_irqsave_unlock(&readyqueues.lock); break; } } out: spinlock_irqsave_unlock(&table_lock); return ret; }
static void f2fs_put_super(struct super_block *sb) { struct f2fs_sb_info *sbi = F2FS_SB(sb); if (sbi->s_proc) { remove_proc_entry("segment_info", sbi->s_proc); remove_proc_entry(sb->s_id, f2fs_proc_root); } kobject_del(&sbi->s_kobj); stop_gc_thread(sbi); /* prevent remaining shrinker jobs */ mutex_lock(&sbi->umount_mutex); /* * We don't need to do checkpoint when superblock is clean. * But, the previous checkpoint was not done by umount, it needs to do * clean checkpoint again. */ if (is_sbi_flag_set(sbi, SBI_IS_DIRTY) || !is_set_ckpt_flags(F2FS_CKPT(sbi), CP_UMOUNT_FLAG)) { struct cp_control cpc = { .reason = CP_UMOUNT, }; write_checkpoint(sbi, &cpc); } /* write_checkpoint can update stat informaion */ f2fs_destroy_stats(sbi); /* * normally superblock is clean, so we need to release this. * In addition, EIO will skip do checkpoint, we need this as well. */ release_ino_entry(sbi); release_discard_addrs(sbi); f2fs_leave_shrinker(sbi); mutex_unlock(&sbi->umount_mutex); /* our cp_error case, we can wait for any writeback page */ if (get_pages(sbi, F2FS_WRITEBACK)) f2fs_flush_merged_bios(sbi); iput(sbi->node_inode); iput(sbi->meta_inode); /* destroy f2fs internal modules */ destroy_node_manager(sbi); destroy_segment_manager(sbi); kfree(sbi->ckpt); kobject_put(&sbi->s_kobj); wait_for_completion(&sbi->s_kobj_unregister); sb->s_fs_info = NULL; if (sbi->s_chksum_driver) crypto_free_shash(sbi->s_chksum_driver); kfree(sbi->raw_super); kfree(sbi); }
void getimage(IMAGE* pDstImg, int srcX, int srcY, int srcWidth, int srcHeight) { Deref(pDstImg).getimage(&get_pages().get_target_ref(), srcX, srcY, srcWidth, srcHeight); }
extern void generate_index(void) { sqlite3_stmt *stmt = (sqlite3_stmt *)NULL; char *ztail = (char *)NULL; char initial = '\0'; char prevword[MAX_WORD_LEN]; char *w; char *d; // deck char *s; // slide list char *p; int slast; // Slide or page number int sn = 0; // Slide or page number short range; char last_out; int rc; short kw; char keyword; char rtf = get_mode() & OPT_RTF; unsigned int codepoint; int len; if ((sqlite3_prepare(G_db, "select case ? when 0 then word" " else replace(word,'\\','\\\\') end," " shortname,slidenum,kw" " from(select x.word,x.shortname," " group_concat(x.slidenum) as slidenum," " case x.kw when 'Y' then 1 else 0 end as kw" " from (select distinct coalesce(w.stem, w.word)" " as word, d.shortname," " case ?" " when 0 then s.slidenum" " when 1 then s.slidenum" " else cast(round((s.slidenum" " - 1)/?)+1 as int)" " end as slidenum," " w.kw" " from words w" " join slides s" " on s.slideid = w.slideid" " join decks d" " on d.deckid = s.deckid" " where length(trim(word))>0" " order by 1, 2, 3) x" " group by x.word,x.shortname,x.kw) y" " order by case when substr(upper(word),1,1)" " between 'A' and 'Z' then 1 else 0 end," " upper(word),shortname", -1, &stmt, (const char **)&ztail) != SQLITE_OK) || (sqlite3_bind_int(stmt, 1, (int)rtf)!= SQLITE_OK) || (sqlite3_bind_int(stmt, 2, (int)get_pages())!= SQLITE_OK) || (sqlite3_bind_int(stmt, 3, (int)get_pages())!= SQLITE_OK)) { fprintf(stderr, "generate_index 0: %s\n", (char *)sqlite3_errmsg(G_db)); (void)sqlite3_close(G_db); exit(1); } if (rtf) { printf("{\\rtf1\\ansi\\ansicpg1252\\cocoartf1404\\cocoasubrtf340\n"); printf("{\\fonttbl\\f0\\fswiss\\fcharset0 Helvetica;\\f1\\fnil\\fcharset0 Consolas-Bold;}\n"); printf("{\\colortbl;\\red255\\green255\\blue255;\\red59\\green0\\blue164;}\n"); printf("\\margl1440\\margr1440\\vieww18540\\viewh14540\\viewkind0\n"); printf("\\pard\\tx566\\tx1133\\tx1700\\tx2267\\tx2834\\tx3401\\tx3968\\tx4535\\tx5102\\tx5669\\tx6236\\tx6803\\pardirnatural\\partightenfactor0\n"); printf("\n\\f0\\fs24 \\cf0 \\\n"); } prevword[0] = '\0'; while ((rc = sqlite3_step(stmt)) == SQLITE_ROW) { w = (char *)sqlite3_column_text(stmt, 0); kw = (char)sqlite3_column_int(stmt, 3); if ((toupper(*w) != initial) && isalpha(*w)) { if (rtf) { printf("\\\n\\\n\\b\\fs44 \\cf2 %c\n\\b0\\fs24 \\cf0 \\\n", toupper(*w)); } else { printf("\n\n--- %c ---", toupper(*w)); } initial = toupper(*w); } w = index_entry(w, &kw); keyword = (kw == 1); if (strncmp(w, prevword, MAX_WORD_LEN)) { if (rtf && autokw() && !keyword) { keyword = lowercase_word(w); } if (rtf) { if (keyword) { printf("\\\n\\f1\\b %s\n\\f0\\b0 \\\n", w); } else { printf("\\\n"); p = w; while (*p) { codepoint = utf8_to_codepoint((const unsigned char *)p, &len); if (len == 1) { putchar(*p); } else { printf("\\u%ld?", (long)codepoint); } p += len; } printf("\\\n"); } } else { printf("\n%s\n", w); } strncpy(prevword, w, MAX_WORD_LEN); } else { if (rtf) { putchar('\\'); } putchar('\n'); } d = (char *)sqlite3_column_text(stmt, 1); printf(" %s%-30.30s%s %s ", (rtf ? "\\i ":""), d, (rtf ? "\n\\i0":""), (get_pages() ? "p." : "")); s = strdup((char *)sqlite3_column_text(stmt, 2)); // Try to condense by replacing three or more consecutive // page or slide values with a n-m range range = 0; slast = -1; p = strtok(s, ","); last_out = 0; while (p) { if (sscanf(p, "%d", &sn)) { if (sn == 1 + slast) { range++; last_out = 0; } else { // Not in the same range of values if (slast > 0) { if (range) { if (range > 1) { printf("-%d,%d", slast, sn); last_out = 1; } else { if (slast > 0) { if (!last_out) { printf(",%d", slast); } printf(",%d", sn); } else { printf("%d", sn); } last_out = 1; } range = 0; } else { printf(",%d", sn); last_out = 1; } } else { printf("%d", sn); last_out = 1; } } slast = sn; } p = strtok(NULL, ","); } if (!last_out) { if (range > 1) { printf("-%d", sn); } else { printf(",%d", sn); } } if (s) { free(s); } } if (rtf) { putchar('\\'); } putchar('\n'); fflush(stdout); if (rc != SQLITE_DONE) { fprintf(stderr, "generate_index 1: %s\n", (char *)sqlite3_errmsg(G_db)); (void)sqlite3_close(G_db); exit(1); } else { (void)sqlite3_finalize(stmt); } if (rtf) { printf("}\n"); } }
void dma_free_coherent(struct device *dev, size_t size, void *vaddr, dma_addr_t dma_handle) { __free_dma_pages((unsigned long)vaddr, get_pages(size)); }
static int do_gipc_send(struct task_struct *task, struct gipc_queue *gq, struct gipc_send_buffer *gbuf, unsigned long __user *uaddr, unsigned long __user *ulen, unsigned long *copied_pages) { struct mm_struct *mm = task->mm; unsigned long addr, len, nr_pages; int rv, i; DEBUG("GIPC_SEND uaddr = %p, ulen = %p\n", uaddr, ulen); rv = copy_from_user(&addr, uaddr, sizeof(unsigned long)); if (rv) { printk(KERN_ALERT "Graphene SEND: bad buffer %p\n", uaddr); return -EFAULT; } rv = copy_from_user(&len, ulen, sizeof(unsigned long)); if (rv) { printk(KERN_ALERT "Graphene SEND: bad buffer %p\n", ulen); return -EFAULT; } if (addr > addr + len) { printk(KERN_ALERT "Graphene SEND: attempt to send %p - %p " " by thread %d FAIL: bad argument\n", (void *) addr, (void *) (addr + len), task->pid); return -EINVAL; } DEBUG("GIPC_SEND addr = %lx, len = %ld\n", addr, len); nr_pages = len >> PAGE_SHIFT; if (!access_ok(VERIFY_READ, addr, len)) { printk(KERN_ALERT "Graphene SEND:" " attempt to send %p - %p (%ld pages) " " by thread %d FAIL: bad permission\n", (void *) addr, (void *) (addr + len), nr_pages, task->pid); return -EFAULT; } DEBUG(" %p - %p (%ld pages) sent by thread %d\n", (void *) addr, (void *) (addr + len), nr_pages, task->pid); while (nr_pages) { unsigned long nr = (nr_pages <= PAGE_QUEUE) ? nr_pages : PAGE_QUEUE; /* for each of these addresses - check if * demand faulting will be triggered * if vma is present, but there is no page * present(pmd/pud not present or PTE_PRESENT * is off) then get_user_pages will trigger * the creation of those */ down_write(&mm->mmap_sem); fill_page_bit_map(mm, addr, nr, gbuf->page_bit_map); rv = get_pages(task, addr, nr, gbuf->page_bit_map, gbuf->pages, gbuf->vmas); if (rv < 0) { up_write(&mm->mmap_sem); break; } for (i = 0; i < nr; i++) { BUG_ON((!gbuf->vmas[i]) && (!!gbuf->pages[i])); if (gbuf->vmas[i] && gbuf->vmas[i]->vm_file) { gbuf->files[i] = get_file(gbuf->vmas[i]->vm_file); gbuf->pgoffs[i] = ((addr - gbuf->vmas[i]->vm_start) >> PAGE_SHIFT) + gbuf->vmas[i]->vm_pgoff; } else { gbuf->files[i] = NULL; gbuf->pgoffs[i] = 0; } addr += PAGE_SIZE; }