int m4u_v2p_new(unsigned int va) { unsigned int pmdOffset = (va & (PMD_SIZE - 1)); unsigned int pageOffset = (va & (PAGE_SIZE - 1)); pgd_t *pgd; pmd_t *pmd; pte_t *pte; unsigned int pa; printk("Enter m4u_user_v2p()! 0x%x\n", va); pgd = pgd_offset(current->mm, va); /* what is tsk->mm */ printk("m4u_user_v2p(), pgd 0x%x\n", pgd); printk("pgd_none=%d, pgd_bad=%d\n", pgd_none(*pgd), pgd_bad(*pgd)); if(pgd_none(*pgd)||pgd_bad(*pgd)) { printk("Error: m4u_user_v2p(), virtual addr 0x%x, pgd invalid! \n", va); return 0; } pmd = pmd_offset(pgd, va); printk("m4u_user_v2p(), pmd 0x%x\n", pmd); printk("pmd_none=%d, pmd_bad=%d, pmd_val=0x%x\n", pmd_none(*pmd), pmd_bad(*pmd), pmd_val(*pmd)); /* If this is a page table entry, keep on walking to the next level */ if (( (unsigned int)pmd_val(*pmd) & PMD_TYPE_MASK) == PMD_TYPE_TABLE) { if(pmd_none(*pmd)||pmd_bad(*pmd)) { printk("Error: m4u_user_v2p(), virtual addr 0x%x, pmd invalid! \n", va); return 0; } pte = pte_offset_map(pmd, va); printk("m4u_user_v2p(), pte 0x%x\n", pte); if(pte_present(*pte)) { pa=(pte_val(*pte) & (PAGE_MASK)) | pageOffset; printk("PA = 0x%8x\n", pa); return pa; } } else /* Only 1 level page table */ { if(pmd_none(*pmd)) { printk("Error: m4u_user_v2p(), virtual addr 0x%x, pmd invalid! \n", va); return 0; } pa=(pte_val(*pmd) & (PMD_MASK)) | pmdOffset; printk("PA = 0x%8x\n", pa); return pa; } return 0; }
/* * This function zeroes out partial mmap'ed pages at truncation time.. */ static void partial_clear(struct vm_area_struct *vma, unsigned long address) { pgd_t *page_dir; pmd_t *page_middle; pte_t *page_table, pte; page_dir = pgd_offset(vma->vm_mm, address); if (pgd_none(*page_dir)) return; if (pgd_bad(*page_dir)) { printk("bad page table directory entry %p:[%lx]\n", page_dir, pgd_val(*page_dir)); pgd_clear(page_dir); return; } page_middle = pmd_offset(page_dir, address); if (pmd_none(*page_middle)) return; if (pmd_bad(*page_middle)) { printk("bad page table directory entry %p:[%lx]\n", page_dir, pgd_val(*page_dir)); pmd_clear(page_middle); return; } page_table = pte_offset(page_middle, address); pte = *page_table; if (!pte_present(pte)) return; flush_cache_page(vma, address); address &= ~PAGE_MASK; address += pte_page(pte); if (address >= high_memory) return; memset((void *) address, 0, PAGE_SIZE - (address & ~PAGE_MASK)); flush_page_to_ram(pte_page(pte)); }
static inline void zap_pte_range(pmd_t * pmd, unsigned long address, unsigned long size) { pte_t * pte; if (pmd_none(*pmd)) return; if (pmd_bad(*pmd)) { printk("zap_pte_range: bad pmd (%08lx)\n", pmd_val(*pmd)); pmd_clear(pmd); return; } pte = pte_offset(pmd, address); address &= ~PMD_MASK; if (address + size > PMD_SIZE) size = PMD_SIZE - address; size >>= PAGE_SHIFT; for (;;) { pte_t page; if (!size) break; page = *pte; pte++; size--; if (pte_none(page)) continue; pte_clear(pte-1); free_pte(page); } }
/* mmlist_lock and vma->vm_mm->page_table_lock are held */ static inline void unuse_pmd(struct vm_area_struct * vma, pmd_t *dir, unsigned long address, unsigned long size, unsigned long offset, swp_entry_t entry, struct page* page) { struct pte_chain * pte_chain = NULL; pte_t *pte, *mapping; unsigned long end; if (pmd_none(*dir)) return; if (pmd_bad(*dir)) { pmd_ERROR(*dir); pmd_clear(dir); return; } mapping = pte = pte_offset_map(dir, address); offset += address & PMD_MASK; address &= ~PMD_MASK; end = address + size; if (end > PMD_SIZE) end = PMD_SIZE; do { /* * FIXME: handle pte_chain_alloc() failures */ if (pte_chain == NULL) pte_chain = pte_chain_alloc(GFP_ATOMIC); unuse_pte(vma, offset+address-vma->vm_start, pte, entry, page, &pte_chain); address += PAGE_SIZE; pte++; } while (address && (address < end)); pte_unmap(mapping); pte_chain_free(pte_chain); }
static inline void remove_mapping_pte_range (pmd_t *pmd, unsigned long address, unsigned long size) { pte_t *pte; unsigned long end; if (pmd_none (*pmd)) return; if (pmd_bad (*pmd)){ printk ("remove_graphics_pte_range: bad pmd (%08lx)\n", pmd_val (*pmd)); pmd_clear (pmd); return; } pte = pte_offset (pmd, address); address &= ~PMD_MASK; end = address + size; if (end > PMD_SIZE) end = PMD_SIZE; do { pte_t entry = *pte; if (pte_present (entry)) set_pte (pte, pte_modify (entry, PAGE_NONE)); address += PAGE_SIZE; pte++; } while (address < end); }
/* * fault_is_priv() * Return true if the fault is a privilege violation. */ STATIC int fault_is_priv(struct pt_regs *regs, unsigned long missqw0, unsigned long missqw1) { pgd_t *pgd; pmd_t *pmd; pte_t *ptep; unsigned long address; /* * Check if this is user or kernel in pt_regs/CSR. */ pgd = (pgd_t *)(MMU_MISSQW0_PGD_GET(missqw0) << MMU_MISSQW0_PGD_SHIFT); address = (unsigned long)(MMU_MISSQW1_VPN_GET(missqw1) << MMU_VPN_SHIFT); pmd = (pmd_t *)__pgd_offset(pgd, address); if (unlikely(pmd_none(*pmd)) || (unlikely(pmd_bad(*pmd)))) { return 0; } ptep = pte_offset_map(pmd, address); if (unlikely(pte_none(*ptep)) || (unlikely(pte_bad(*ptep)))) { return 0; } /* * If the PTE is a supervisory PTE and we are in user_mode() * declare this as a privilege violation. */ if (user_mode(regs) && ((pte_val(*ptep) & L_PTE_USER) == 0)) { return 1; } return 0; }
u32 imm_get_physical(void *v, u32 immid) { pmd_t *pmd; pte_t *pte; pgd_t *pgd; u32 val = 0, virtual = (u32)v; struct mm_struct* mm; if (IMMID_USER(immid)) mm = current->mm; else mm = &init_mm; pgd = pgd_offset(mm, virtual); if (!pgd_none(*pgd) && !pgd_bad(*pgd)) { /* 1st level entry pointer */ pmd = pmd_offset(pgd, virtual); if (!pmd_none(*pmd) && !pmd_bad(*pmd)) { /* 2nd level entry pointer */ pte = pte_offset_kernel(pmd, virtual); if (pte) { val = (*(u32 *)((u32)pte-2048))&PAGE_MASK; val += virtual%PAGE_SIZE; } } else if (!pmd_none(*pmd)) {
static inline int zap_pte_range(struct mm_struct *mm, pmd_t * pmd, unsigned long address, unsigned long size) { pte_t * pte; int freed; if (pmd_none(*pmd)) return 0; if (pmd_bad(*pmd)) { pmd_ERROR(*pmd); pmd_clear(pmd); return 0; } pte = pte_offset(pmd, address); address &= ~PMD_MASK; if (address + size > PMD_SIZE) size = PMD_SIZE - address; size >>= PAGE_SHIFT; freed = 0; for (;;) { pte_t page; if (!size) break; page = ptep_get_and_clear(pte); pte++; size--; if (pte_none(page)) continue; freed += free_pte(page); } return freed; }
static struct page* my_follow_page(struct vm_area_struct *vma, unsigned long addr) { pud_t *pud = NULL; pmd_t *pmd = NULL; pgd_t *pgd = NULL; pte_t *pte = NULL; spinlock_t *ptl = NULL; struct page* page = NULL; struct mm_struct *mm = current->mm; pgd = pgd_offset(current->mm, addr); if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd))) { goto out; } pud = pud_offset(pgd, addr); if (pud_none(*pud) || unlikely(pud_bad(*pud))) { goto out; } printk("aaaa\n"); pmd = pmd_offset(pud, addr); if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd))) { goto out; } pte = pte_offset_map_lock(current->mm, pmd, addr, &ptl); printk("bbbb\n"); if (!pte) goto out; printk("cccc\n"); if (!pte_present(*pte)) goto unlock; page = pfn_to_page(pte_pfn(*pte)); if (!page) goto unlock; get_page(page); unlock: pte_unmap_unlock(pte, ptl); out: return page; }
static int filemap_sync_pte_range(pmd_t * pmd, unsigned long address, unsigned long end, struct vm_area_struct *vma, unsigned int flags) { pte_t *pte; int error; if (pmd_none(*pmd)) return 0; if (pmd_bad(*pmd)) { pmd_ERROR(*pmd); pmd_clear(pmd); return 0; } pte = pte_offset_map(pmd, address); if ((address & PMD_MASK) != (end & PMD_MASK)) end = (address & PMD_MASK) + PMD_SIZE; error = 0; do { error |= filemap_sync_pte(pte, vma, address, flags); address += PAGE_SIZE; pte++; } while (address && (address < end)); pte_unmap(pte - 1); return error; }
/* * Dump out the page tables associated with 'addr' in mm 'mm'. */ void show_pte(struct mm_struct *mm, unsigned long addr) { pgd_t *pgd; if (!mm) mm = &init_mm; pr_alert("pgd = %p\n", mm->pgd); pgd = pgd_offset(mm, addr); pr_alert("[%08lx] *pgd=%016llx", addr, pgd_val(*pgd)); do { pud_t *pud; pmd_t *pmd; pte_t *pte; if (pgd_none(*pgd) || pgd_bad(*pgd)) break; pud = pud_offset(pgd, addr); if (pud_none(*pud) || pud_bad(*pud)) break; pmd = pmd_offset(pud, addr); printk(", *pmd=%016llx", pmd_val(*pmd)); if (pmd_none(*pmd) || pmd_bad(*pmd)) break; pte = pte_offset_map(pmd, addr); printk(", *pte=%016llx", pte_val(*pte)); pte_unmap(pte); } while(0); printk("\n"); }
static unsigned long get_phys_addr(struct task_struct * p, unsigned long ptr) { pgd_t *page_dir; pmd_t *page_middle; pte_t pte; if (!p || !p->mm || ptr >= TASK_SIZE) return 0; page_dir = pgd_offset(p->mm,ptr); if (pgd_none(*page_dir)) return 0; if (pgd_bad(*page_dir)) { printk("bad page directory entry %08lx\n", pgd_val(*page_dir)); pgd_clear(page_dir); return 0; } page_middle = pmd_offset(page_dir,ptr); if (pmd_none(*page_middle)) return 0; if (pmd_bad(*page_middle)) { printk("bad page middle entry %08lx\n", pmd_val(*page_middle)); pmd_clear(page_middle); return 0; } pte = *pte_offset(page_middle,ptr); if (!pte_present(pte)) return 0; return pte_page(pte) + (ptr & ~PAGE_MASK); }
static void alloc_init_pte(pmd_t *pmd, unsigned long addr, unsigned long end, unsigned long pfn, pgprot_t prot, phys_addr_t (*pgtable_alloc)(void)) { pte_t *pte; if (pmd_none(*pmd) || pmd_sect(*pmd)) { phys_addr_t pte_phys; BUG_ON(!pgtable_alloc); pte_phys = pgtable_alloc(); pte = pte_set_fixmap(pte_phys); if (pmd_sect(*pmd)) split_pmd(pmd, pte); __pmd_populate(pmd, pte_phys, PMD_TYPE_TABLE); flush_tlb_all(); pte_clear_fixmap(); } BUG_ON(pmd_bad(*pmd)); pte = pte_set_fixmap_offset(pmd, addr); do { set_pte(pte, pfn_pte(pfn, prot)); pfn++; } while (pte++, addr += PAGE_SIZE, addr != end); pte_clear_fixmap(); }
static inline int unuse_pmd(struct vm_area_struct * vma, pmd_t *dir, unsigned long address, unsigned long size, unsigned long offset, unsigned int type, unsigned long page) { pte_t * pte; unsigned long end; if (pmd_none(*dir)) return 0; if (pmd_bad(*dir)) { printk("unuse_pmd: bad pmd (%08lx)\n", pmd_val(*dir)); pmd_clear(dir); return 0; } pte = pte_offset(dir, address); offset += address & PMD_MASK; address &= ~PMD_MASK; end = address + size; if (end > PMD_SIZE) end = PMD_SIZE; do { if (unuse_pte(vma, offset+address-vma->vm_start, pte, type, page)) return 1; address += PAGE_SIZE; pte++; } while (address < end); return 0; }
void free_pgd_slow(pgd_t *pgd) { pmd_t *pmd; pte_t *pte; if (!pgd) return; /* pgd is always present and good */ pmd = (pmd_t *)pgd; if (pmd_none(*pmd)) goto free; if (pmd_bad(*pmd)) { pmd_ERROR(*pmd); pmd_clear(pmd); goto free; } pte = pte_offset(pmd, 0); pmd_clear(pmd); pte_free(pte); pmd_free(pmd); free: free_pages((unsigned long) pgd, 2); }
static inline void unswap_pmd(struct vm_area_struct * vma, pmd_t *dir, unsigned long address, unsigned long size, unsigned long offset, unsigned long entry, unsigned long page /* , int isswap */) { pte_t * pte; unsigned long end; if (pmd_none(*dir)) return; if (pmd_bad(*dir)) { printk("unswap_pmd: bad pmd (%08lx)\n", pmd_val(*dir)); pmd_clear(dir); return; } pte = pte_offset(dir, address); offset += address & PMD_MASK; address &= ~PMD_MASK; end = address + size; if (end > PMD_SIZE) end = PMD_SIZE; do { unswap_pte(vma, offset+address-vma->vm_start, pte, entry, page /* , isswap */); address += PAGE_SIZE; pte++; } while (address < end); }
static inline void free_area_pte(pmd_t * pmd, unsigned long address, unsigned long size) { pte_t * pte; unsigned long end; if (pmd_none(*pmd)) return; if (pmd_bad(*pmd)) { printk("free_area_pte: bad pmd (%08lx)\n", pmd_val(*pmd)); pmd_clear(pmd); return; } pte = pte_offset(pmd, address); address &= ~PMD_MASK; end = address + size; if (end > PMD_SIZE) end = PMD_SIZE; while (address < end) { pte_t page = *pte; pte_clear(pte); address += PAGE_SIZE; pte++; if (pte_none(page)) continue; if (pte_present(page)) { free_page(pte_page(page)); continue; } printk("Whee.. Swapped out page in kernel page table\n"); } }
void free_pgd_slow(struct mm_struct *mm, pgd_t *pgd) { unsigned long flags; pmd_t *pmd; pgtable_t pte; if (!pgd) return; /* pgd is always present and good */ pmd = pmd_off(pgd + pgd_index(fcse_va_to_mva(mm, 0)), 0); if (pmd_none(*pmd)) goto free; if (pmd_bad(*pmd)) { pmd_ERROR(*pmd); pmd_clear(pmd); goto free; } pte = pmd_pgtable(*pmd); pmd_clear(pmd); pte_free(mm, pte); pmd_free(mm, pmd); free: pgd_list_lock(flags); pgd_list_del(pgd); pgd_list_unlock(flags); free_pages((unsigned long) pgd, 2); }
static inline pte_t *get_one_pte(struct mm_struct *mm, unsigned long addr) { pgd_t * pgd; pmd_t * pmd; pte_t * pte = NULL; pgd = pgd_offset(mm, addr); if (pgd_none(*pgd)) goto end; if (pgd_bad(*pgd)) { pgd_ERROR(*pgd); pgd_clear(pgd); goto end; } pmd = pmd_offset(pgd, addr); if (pmd_none(*pmd)) goto end; if (pmd_bad(*pmd)) { pmd_ERROR(*pmd); pmd_clear(pmd); goto end; } pte = pte_offset(pmd, addr); if (pte_none(*pte)) pte = NULL; end: return pte; }
/* * Do a quick page-table lookup for a single page. */ static struct page * follow_page(struct mm_struct *mm, unsigned long address, int write) { pgd_t *pgd; pmd_t *pmd; pte_t *ptep, pte; pgd = pgd_offset(mm, address); if (pgd_none(*pgd) || pgd_bad(*pgd)) goto out; pmd = pmd_offset(pgd, address); if (pmd_none(*pmd) || pmd_bad(*pmd)) goto out; ptep = pte_offset(pmd, address); if (!ptep) goto out; pte = *ptep; if (pte_present(pte)) { if (!write || (pte_write(pte) && pte_dirty(pte))) return pte_page(pte); } out: return 0; }
void free_pgd_slow(struct mm_struct *mm, pgd_t *pgd) { pmd_t *pmd; pgtable_t pte; if (!pgd) return; /* pgd is always present and good */ pmd = pmd_off(pgd, 0); if (pmd_none(*pmd)) goto free; if (pmd_bad(*pmd)) { pmd_ERROR(*pmd); pmd_clear(pmd); goto free; } pte = pmd_pgtable(*pmd); pmd_clear(pmd); pte_free(mm, pte); pmd_free(mm, pmd); free: free_pages((unsigned long) pgd, 2); }
static void shmedia_unmapioaddr(unsigned long vaddr) { pgd_t *pgdp; pud_t *pudp; pmd_t *pmdp; pte_t *ptep; pgdp = pgd_offset_k(vaddr); if (pgd_none(*pgdp) || pgd_bad(*pgdp)) return; pudp = pud_offset(pgdp, vaddr); if (pud_none(*pudp) || pud_bad(*pudp)) return; pmdp = pmd_offset(pudp, vaddr); if (pmd_none(*pmdp) || pmd_bad(*pmdp)) return; ptep = pte_offset_kernel(pmdp, vaddr); if (pte_none(*ptep) || !pte_present(*ptep)) return; clear_page((void *)ptep); pte_clear(&init_mm, vaddr, ptep); }
static int pin_page_for_write(const void __user *_addr, pte_t **ptep, spinlock_t **ptlp) { unsigned long addr = (unsigned long)_addr; pgd_t *pgd; pmd_t *pmd; pte_t *pte; pud_t *pud; spinlock_t *ptl; pgd = pgd_offset(current->mm, addr); if (unlikely(pgd_none(*pgd) || pgd_bad(*pgd))) return 0; pud = pud_offset(pgd, addr); if (unlikely(pud_none(*pud) || pud_bad(*pud))) return 0; pmd = pmd_offset(pud, addr); if (unlikely(pmd_none(*pmd) || pmd_bad(*pmd))) return 0; pte = pte_offset_map_lock(current->mm, pmd, addr, &ptl); if (unlikely(!pte_present(*pte) || !pte_young(*pte) || !pte_write(*pte) || !pte_dirty(*pte))) { pte_unmap_unlock(pte, ptl); return 0; } *ptep = pte; *ptlp = ptl; return 1; }
static inline int copy_pte_range(pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long address, unsigned long size, int cow) { pte_t * src_pte, * dst_pte; unsigned long end; if (pmd_none(*src_pmd)) return 0; if (pmd_bad(*src_pmd)) { printk("copy_pte_range: bad pmd (%08lx)\n", pmd_val(*src_pmd)); pmd_clear(src_pmd); return 0; } src_pte = pte_offset(src_pmd, address); if (pmd_none(*dst_pmd)) { if (!pte_alloc(dst_pmd, 0)) return -ENOMEM; } dst_pte = pte_offset(dst_pmd, address); address &= ~PMD_MASK; end = address + size; if (end >= PMD_SIZE) end = PMD_SIZE; do { /* I would like to switch arguments here, to make it * consistent with copy_xxx_range and memcpy syntax. */ copy_one_pte(src_pte++, dst_pte++, cow); address += PAGE_SIZE; } while (address < end); return 0; }
static inline pte_t *get_one_pte(struct mm_struct *mm, unsigned long addr) { pgd_t * pgd; pmd_t * pmd; pte_t * pte = NULL; pgd = pgd_offset(mm, addr); if (pgd_none(*pgd)) goto end; if (pgd_bad(*pgd)) { printk("move_one_page: bad source pgd (%08lx)\n", pgd_val(*pgd)); pgd_clear(pgd); goto end; } pmd = pmd_offset(pgd, addr); if (pmd_none(*pmd)) goto end; if (pmd_bad(*pmd)) { printk("move_one_page: bad source pmd (%08lx)\n", pmd_val(*pmd)); pmd_clear(pmd); goto end; } pte = pte_offset(pmd, addr); if (pte_none(*pte)) pte = NULL; end: return pte; }
static inline int swap_out_pmd(struct mm_struct * mm, struct vm_area_struct * vma, pmd_t *dir, unsigned long address, unsigned long end, int gfp_mask) { pte_t * pte; unsigned long pmd_end; if (pmd_none(*dir)) return 0; if (pmd_bad(*dir)) { pmd_ERROR(*dir); pmd_clear(dir); return 0; } pte = pte_offset(dir, address); pmd_end = (address + PMD_SIZE) & PMD_MASK; if (end > pmd_end) end = pmd_end; do { int result; mm->swap_address = address + PAGE_SIZE; result = try_to_swap_out(mm, vma, address, pte, gfp_mask); if (result) return result; if (!mm->swap_cnt) return 0; address += PAGE_SIZE; pte++; } while (address && (address < end)); return 0; }
/* mmlist_lock and vma->vm_mm->page_table_lock are held */ static inline void unuse_pmd(struct vm_area_struct * vma, pmd_t *dir, unsigned long address, unsigned long size, unsigned long offset, swp_entry_t entry, struct page* page) { pte_t * pte; unsigned long end; if (pmd_none(*dir)) return; if (pmd_bad(*dir)) { pmd_ERROR(*dir); pmd_clear(dir); return; } pte = pte_offset(dir, address); offset += address & PMD_MASK; address &= ~PMD_MASK; end = address + size; if (end > PMD_SIZE) end = PMD_SIZE; do { unuse_pte(vma, offset+address-vma->vm_start, pte, entry, page); address += PAGE_SIZE; pte++; } while (address && (address < end)); }
static inline void iterate_pte(pmd_t * pmd, unsigned long address, unsigned long size, pte_iterator_t op, unsigned long arg) { pte_t *pte; unsigned long end; if (pmd_none(*pmd)) return; if (pmd_bad(*pmd)) { pmd_ERROR(*pmd); pmd_clear(pmd); return; } pte = pte_offset(pmd, address); address &= ~PMD_MASK; end = address + size; if (end > PMD_SIZE) end = PMD_SIZE; do { op(pte, arg); address += PAGE_SIZE; pte++; } while (address < end); }
/* this routine handles present pages, when users try to write to a shared page. */ void do_wp_page(struct vm_area_struct *vma, unsigned long address, int write_access) { pgd_t *pgd; pmd_t *pmd; pte_t *page_table,pte; unsigned long old_page, new_page; new_page = get_free_page(GFP_KERNEL); pgd = pgd_offset(vma->vm_task, address); if(pgd_none(*pgd)) goto end_wp_page; if(pgd_bad(*pgd)) goto bad_wp_page; pmd = pmd_offset(pgd,address); if(pmd_none(*pmd)) goto end_wp_page; if(pmd_bad(*pmd)) goto bad_wp_page; page_table = pte_offset(pmd,address); pte = *page_table; if(!pte_present(pte)) goto end_wp_page; if(pte_write(pte)) goto end_wp_page; old_page = pte_page(pte); if(old_page >= main_memory_end) goto bad_wp_page; (vma->vm_task->mm->min_flt)++; if(mem_map[MAP_NR(old_page)].flags & PAGE_PRESENT) { if(new_page) { if(mem_map[MAP_NR(old_page)].flags & MAP_PAGE_RESERVED) ++(vma->vm_task->mm->rss); copy_page(old_page, new_page); *page_table = pte_mkwrite(pte_mkdirty(mk_pte((unsigned long)&new_page, vma->vm_page_prot))); free_page(old_page); return; } pte_val(*page_table) &= PAGE_BAD; free_page(old_page); oom(); return; } *page_table = pte_mkdirty(pte_mkwrite(pte)); if(new_page) free_page(new_page); return; bad_wp_page: printk("do_wp_page: bogus page at address %08lx (%08lx)\n",address,old_page); goto end_wp_page; end_wp_page: if(new_page) free_page(new_page); return; }
static int mem_write(struct inode * inode, struct file * file,char * buf, int count) { pgd_t *page_dir; pmd_t *page_middle; pte_t pte; char * page; struct task_struct * tsk; unsigned long addr; char *tmp; int i; if (count < 0) return -EINVAL; addr = file->f_pos; tsk = get_task(inode->i_ino >> 16); if (!tsk) return -ESRCH; tmp = buf; while (count > 0) { if (current->signal & ~current->blocked) break; page_dir = pgd_offset(tsk,addr); if (pgd_none(*page_dir)) break; if (pgd_bad(*page_dir)) { printk("Bad page dir entry %08lx\n", pgd_val(*page_dir)); pgd_clear(page_dir); break; } page_middle = pmd_offset(page_dir,addr); if (pmd_none(*page_middle)) break; if (pmd_bad(*page_middle)) { printk("Bad page middle entry %08lx\n", pmd_val(*page_middle)); pmd_clear(page_middle); break; } pte = *pte_offset(page_middle,addr); if (!pte_present(pte)) break; if (!pte_write(pte)) break; page = (char *) pte_page(pte) + (addr & ~PAGE_MASK); i = PAGE_SIZE-(addr & ~PAGE_MASK); if (i > count) i = count; memcpy_fromfs(page, tmp, i); addr += i; tmp += i; count -= i; } file->f_pos = addr; if (tmp != buf) return tmp-buf; if (current->signal & ~current->blocked) return -ERESTARTSYS; return 0; }