Exemple #1
0
/*
 * Test all pages in the range is free(means isolated) or not.
 * all pages in [start_pfn...end_pfn) must be in the same zone.
 * zone->lock must be held before call this.
 *
 * Returns 1 if all pages in the range are isolated.
 */
static int
__test_page_isolated_in_pageblock(unsigned long pfn, unsigned long end_pfn)
{
	struct page *page;

	while (pfn < end_pfn) {
		if (!pfn_valid_within(pfn)) {
			pfn++;
			continue;
		}
		page = pfn_to_page(pfn);
		if (PageBuddy(page))
			pfn += 1 << page_order(page);
		else if (page_count(page) == 0 &&
				page_private(page) == MIGRATE_ISOLATE) {
			pfn += 1;
			printk(KERN_INFO "%s:%d ", __func__, __LINE__);
			dump_page(page);
		} else {
			printk(KERN_INFO "%s:%d ", __func__, __LINE__);
			dump_page(page);
			break;
		}
	}
	if (pfn < end_pfn)
		return 0;
	return 1;
}
static _mali_osk_errcode_t dump_mmu_page_table(struct mali_page_directory *pagedir, struct dump_info * info)
{
	MALI_DEBUG_ASSERT_POINTER(pagedir);
	MALI_DEBUG_ASSERT_POINTER(info);

	if (NULL != pagedir->page_directory_mapped)
	{
		int i;

		MALI_CHECK_NO_ERROR(
			dump_page(pagedir->page_directory_mapped, pagedir->page_directory, info)
			);

		for (i = 0; i < 1024; i++)
		{
			if (NULL != pagedir->page_entries_mapped[i])
			{
				MALI_CHECK_NO_ERROR(
				    dump_page(pagedir->page_entries_mapped[i],
				        _mali_osk_mem_ioread32(pagedir->page_directory_mapped,
				        i * sizeof(u32)) & ~MALI_MMU_FLAGS_MASK, info)
				);
			}
		}
	}

	MALI_SUCCESS;
}
Exemple #3
0
static void print_address_description(struct kasan_access_info *info)
{
	const void *addr = info->access_addr;

	if ((addr >= (void *)PAGE_OFFSET) &&
		(addr < high_memory)) {
		struct page *page = virt_to_head_page(addr);

		if (PageSlab(page)) {
			void *object;
			struct kmem_cache *cache = page->slab_cache;
			object = nearest_obj(cache, page,
						(void *)info->access_addr);
			kasan_object_err(cache, object);
			return;
		}
		dump_page(page, "kasan: bad access detected");
	}

	if (kernel_or_module_addr(addr)) {
		if (!init_task_stack_addr(addr))
			pr_err("Address belongs to variable %pS\n", addr);
	}
	dump_stack();
}
Exemple #4
0
static void print_address_description(struct kasan_access_info *info)
{
	const void *addr = info->access_addr;

	if ((addr >= (void *)PAGE_OFFSET) &&
		(addr < high_memory)) {
		struct page *page = virt_to_head_page(addr);

		if (PageSlab(page)) {
			void *object;
			struct kmem_cache *cache = page->slab_cache;
			void *last_object;

			object = virt_to_obj(cache, page_address(page), addr);
			last_object = page_address(page) +
				page->objects * cache->size;

			if (unlikely(object > last_object))
				object = last_object; /* we hit into padding */

			object_err(cache, page, object,
				"kasan: bad access detected");
			return;
		}
		dump_page(page, "kasan: bad access detected");
	}

	if (kernel_or_module_addr(addr)) {
		if (!init_task_stack_addr(addr))
			pr_err("Address belongs to variable %pS\n", addr);
	}

	dump_stack();
}
Exemple #5
0
static int
test_map_dump(int argc, char *argv[])
{
    DOMAIN_ID domid;
    void *map;
    GRANT_MAP_HANDLE handle;
    ALIEN_GRANT_REF gref;

    UNREFERENCED_PARAMETER(argc);

    domid = wrap_DOMAIN_ID(atoi(argv[0]));
    gref = wrap_ALIEN_GRANT_REF(atoi(argv[1]));

    if (!xenops_grant_map_readonly(xenops,
                                   domid,
                                   1,
                                   &gref,
                                   &handle,
                                   &map))
        xs_win_err(1, &xs_render_error_stderr,
                   "mapping %d::%d readonly", unwrap_DOMAIN_ID(domid), gref);
    printf("Mapped to %p\n", map);

    dump_page(map);

    xenops_unmap_grant(xenops, handle);

    return 0;

}
static void dump_handler(int pid, void *start_of_page) {
  debug_print("Dump handler detected access at %p\n", start_of_page);

  /* Unprotect Page */
  unprotect_i(pid, start_of_page, PAGESIZE);
  dump_page(pid, start_of_page);
}
Exemple #7
0
static int
test_map_read_after_unmap(int argc, char *argv[])
{
    DWORD code;
    DOMAIN_ID domid;
    ALIEN_GRANT_REF gref;
    void *map;
    GRANT_MAP_HANDLE handle;

    UNREFERENCED_PARAMETER(argc);

    domid = wrap_DOMAIN_ID(atoi(argv[0]));
    gref = wrap_ALIEN_GRANT_REF(atoi(argv[1]));

    if (!xenops_grant_map_readonly(xenops,
                                   domid,
                                   1,
                                   &gref,
                                   &handle,
                                   &map))
        xs_win_err(1, &xs_render_error_stderr,
                   "mapping %d::%d readonly",
                   unwrap_DOMAIN_ID(domid),
                   unwrap_ALIEN_GRANT_REF(gref));
    xenops_unmap_grant(xenops, handle);

    code = 0xf001dead;
    __try {
        dump_page(map);
    } __except (code = GetExceptionCode(), EXCEPTION_EXECUTE_HANDLER) {
        printf("Exception %x reading from unmapped memory\n",
               code);
    }
Exemple #8
0
static int
test_offer_write(int argc, char *argv[])
{
    DOMAIN_ID domid;
    unsigned delay;
    GRANT_REF gref;
    void *buffer;

    UNREFERENCED_PARAMETER(argc);

    domid = wrap_DOMAIN_ID(atoi(argv[0]));
    delay = atoi(argv[1]);

    buffer = allocate_page();
    memset(buffer, 0, PAGE_SIZE);
    printf("buffer at %p\n", buffer);
    if (!xenops_grant_readwrite(xenops, domid, buffer, &gref))
        xs_win_err(1, &xs_render_error_stderr,
                   "performing grant operation");
    printf("grant with reference %d\n", xen_GRANT_REF(gref));
    if (delay == 0) {
        while (1)
            Sleep(INFINITE);
    } else {
        Sleep(delay * 1000);
        if (!xenops_ungrant(xenops, gref))
            xs_win_err(1, &xs_render_error_stderr,
                       "revoking grant after %d seconds", delay);
    }
    dump_page(buffer);

    return 0;
}
static void dump_unprotected_pages(pid_t pid) {
  debug_print("%s\n", "START DUMP UNPROTECTED PAGE");

  /* Dump the unprotected pages before entering the codelet region. */
  for (int i = 0; i < log_size; i++) {
    int c = (i + last_page) % log_size;
    if (pages_cache[c] != 0) {
      dump_page(pid, pages_cache[c]);
    }
  }

  debug_print("%s\n", "END DUMP UNPROTECTED PAGE");
}
Exemple #10
0
int
main(int argc, char **argv)
{
	_cleanup_close_ int fd = 0;
	const char *path;
	size_t page = 0, offset = 0;
	struct hidpp20_device *dev = NULL;
	struct hidpp_device base;
	struct hidpp20_onboard_profiles_info info = { 0 };
	int rc;

	if (argc < 2 || argc > 4) {
		usage();
		return 1;
	}

	path = argv[argc - 1];
	fd = open(path, O_RDWR);
	if (fd < 0)
		error(1, errno, "Failed to open path %s", path);

	hidpp_device_init(&base, fd);
	dev = hidpp20_device_new(&base, 0xff);
	if (!dev)
		error(1, 0, "Failed to open %s as a HID++ 2.0 device", path);

	hidpp20_onboard_profiles_get_profiles_desc(dev, &info);

	if (argc == 2)
		rc = dump_everything(dev, info.sector_size);
	else {
		page = atoi(argv[1]);
		if (argc > 3)
			offset = atoi(argv[2]);
		rc = dump_page(dev, info.sector_size, 0, page, offset);
	}

	hidpp20_device_destroy(dev);

	return rc;
}
Exemple #11
0
static int
test_map_write_readonly(int argc, char *argv[])
{
    DOMAIN_ID domid;
    ALIEN_GRANT_REF gref;
    void *map;
    GRANT_MAP_HANDLE handle;
    unsigned x;

    UNREFERENCED_PARAMETER(argc);

    domid = wrap_DOMAIN_ID(atoi(argv[0]));
    gref = wrap_ALIEN_GRANT_REF(atoi(argv[1]));

    if (!xenops_grant_map_readonly(xenops,
                                   domid,
                                   1,
                                   &gref,
                                   &handle,
                                   &map))
        xs_win_err(1, &xs_render_error_stderr,
                   "mapping %d::%d readonly",
                   unwrap_DOMAIN_ID(domid),
                   unwrap_ALIEN_GRANT_REF(gref));
    printf("Mapped to %p\n", map);

    for (x = 0; x < PAGE_SIZE; x ++)
        ((unsigned char *)map)[x] = (unsigned char)(x + 5);

    printf("Completed write phase.\n");

    dump_page(map);

    xenops_unmap_grant(xenops, handle);

    printf("Performed unmap.\n");

    return 0;
}
Exemple #12
0
static inline int
dump_all_pages(struct hidpp20_device *dev, uint16_t sector_size, uint8_t rom)
{
	uint8_t page;
	int rc = 0;

	for (page = 0; page < 31; page++) {
		rc = dump_page(dev, sector_size, rom, page, 0);
		if (rc != 0)
			break;
	}

	/* We dumped at least one page successfully and get EAGAIN, so we're
	 * on the last page. Overwrite the last line with a blank one so it
	 * doesn't look like an error */
	if (page > 0 && rc == ENOENT) {
		hidpp_log_info(&dev->base, "\r                                   \n");
		rc = 0;
	}

	return rc;
}
Exemple #13
0
static void print_address_description(void *addr)
{
	struct page *page = addr_to_page(addr);

	dump_stack();
	pr_err("\n");

	if (page && PageSlab(page)) {
		struct kmem_cache *cache = page->slab_cache;
		void *object = nearest_obj(cache, page,	addr);

		describe_object(cache, object, addr);
	}

	if (kernel_or_module_addr(addr) && !init_task_stack_addr(addr)) {
		pr_err("The buggy address belongs to the variable:\n");
		pr_err(" %pS\n", addr);
	}

	if (page) {
		pr_err("The buggy address belongs to the page:\n");
		dump_page(page, "kasan: bad access detected");
	}
}
Exemple #14
0
static int32_t _write(spiffs *fs, uint32_t addr, uint32_t size, uint8_t *src) {
  int i;
  //printf("wr %08x %i\n", addr, size);
  if (log_flash_ops) {
    bytes_wr += size;
    writes++;
    if (error_after_bytes_written > 0 && bytes_wr >= error_after_bytes_written) {
      if (error_after_bytes_written_once_only) {
        error_after_bytes_written = 0;
      }
      return SPIFFS_ERR_TEST;
    }
  }

  if (addr < __fs.cfg.phys_addr) {
    printf("FATAL write addr too low %08x < %08x\n", addr, SPIFFS_PHYS_ADDR);
    exit(0);
  }
  if (addr + size > __fs.cfg.phys_addr + __fs.cfg.phys_size) {
    printf("FATAL write addr too high %08x + %08x > %08x\n", addr, size, SPIFFS_PHYS_ADDR + SPIFFS_FLASH_SIZE);
    exit(0);
  }

  for (i = 0; i < size; i++) {
    if (((addr + i) & (__fs.cfg.log_page_size-1)) != offsetof(spiffs_page_header, flags)) {
      if (check_valid_flash && ((AREA(addr + i) ^ src[i]) & src[i])) {
        printf("trying to write %02x to %02x at addr %08x\n", src[i], AREA(addr + i), addr+i);
        spiffs_page_ix pix = (addr + i) / LOG_PAGE;
        dump_page(&__fs, pix);
        return -1;
      }
    }
    AREA(addr + i) &= src[i];
  }
  return 0;
}
Exemple #15
0
int main(int argc,char *argv[]){
  int c,long_option_index;
  int eof=0;
  int vorbiscount=0;

  /* get options */
  while((c=getopt_long(argc,argv,optstring,options,&long_option_index))!=EOF){
    switch(c){
    case 'c':
      codebook_p=1;
      break;
    case 'g':
      pageinfo_p=1;
      break;
    case 'h':
      usage(stdout);
      exit(0);
    case 'H':
      headerinfo_p=1;
      break;
    case 'p':
      packetinfo_p=1;
      break;
    case 's':
      streaminfo_p=1;
      break;
    case 't':
      truncpacket_p=1;
      break;
    case 'v':
      codebook_p=1;
      pageinfo_p=1;
      headerinfo_p=1;
      packetinfo_p=1;
      streaminfo_p=1;
      truncpacket_p=1;
      warn_p=1;
      break;
    case 'w':
      warn_p=1;
      break;
    default:
      usage(stderr);
      exit(1);
    }
  }

  /* set up sync */
  
  oy=ogg2_sync_create(); 
  os=ogg2_stream_create(0);


  while(!eof){
    long ret;
    long garbagecounter=0;
    long pagecounter=0;
    long packetcounter=0;
    int initialphase=0;

    memset(&vi,0,sizeof(vi));

    /* packet parsing loop */
    while(1){
      
      /* is there a packet available? */
      if(ogg2_stream_packetout(os,&op)>0){
	/* yes, process it */

	if(packetcounter<3){
	  /* header packet */
	  ret=vorbis_info_headerin(&vi,&op);
	  if(ret){
	    switch(packetcounter){
	    case 0: /* initial header packet */
	      if((streaminfo_p || warn_p || headerinfo_p) && syncp)
		printf("WARN stream: page did not contain a valid Vorbis I "
		       "identification\n"
		       "             header. Stream is not decodable as "
		       "Vorbis I.\n\n");
	      break;
	    case 1:
	      if((streaminfo_p || warn_p || headerinfo_p) && syncp)
		printf("WARN stream: next packet is not a valid Vorbis I "
		       "comment header as expected.\n"
		       "             Stream is not decodable as "
		       "Vorbis I.\n\n");
	      break;
	    case 2:
	      if((streaminfo_p || warn_p || headerinfo_p) && syncp)
		printf("WARN stream: next packet is not a valid Vorbis I "
		       "setup header as expected.\n"
		       "             Stream is not decodable as "
		       "Vorbis I.\n\n");
	      
	      break;
	    }
	    syncp=0;
	  }else{
	    syncp=1;
	    packetcounter++;
	    if(packetcounter==3){
	      if(streaminfo_p || headerinfo_p)
		printf("info stream: Vorbis I header triad parsed successfully.\n\n");
	      vorbiscount++;
	    }
	  }
	}else{
	  /* audio packet */

	  vorbis_decode(&vi,&op);
	  packetcounter++;
	}
	continue;
      }

      /* is there a page available? */
	  
      ret=ogg2_sync_pageseek(oy,&og);
      if(ret<0){
	garbagecounter-=ret;
      }
      if(ret>0){
	/* Garbage between pages? */
	if(garbagecounter){
	  if(streaminfo_p || warn_p || pageinfo_p)
	    fprintf(stdout,"WARN stream: %ld bytes of garbage before page %ld\n\n",
		    garbagecounter,pagecounter);
	  garbagecounter=0;
	}

	if(initialphase && !ogg2_page_bos(&og)){
	  /* initial header pages phase has ended */
	  if(streaminfo_p || headerinfo_p){
	    printf("info stream: All identification header pages parsed.\n"
		   "             %d logical stream%s muxed in this link.\n\n",
		   initialphase,(initialphase==1?"":"s"));
	    if(initialphase>1 && (warn_p || streaminfo_p))
	      printf("WARN stream: A 'Vorbis I audio stream' must contain uninterleaved\n"
		     "             Vorbis I logical streams only.  This is a legal\n"
		     "             multimedia Ogg stream, but not a Vorbis I audio\n"
		     "             stream.\n\n");
	  }
	  initialphase=0;
	}

	if(pageinfo_p)dump_page(&og);


	/* is this a stream transition? */
	if(ogg2_page_bos(&og) || pagecounter==0){
	  if(initialphase){
	    /* we're in a muxed stream, which is illegal for Vorbis I
               audio-only, but perfectly legal for Ogg. */
	    if(!syncp){
	      /* we've not yet seen the Vorbis header go past; keep trying new streams */
	      ogg2_stream_reset_serialno(os,ogg2_page_serialno(&og));
	    }
	  }else{
	    /* first new packet, signals new stream link.  Dump the current vorbis stream, if any */
	    ogg2_stream_reset_serialno(os,ogg2_page_serialno(&og));
	    memset(&vi,0,sizeof(vi));
	    packetcounter=0;
	    vorbis_info_clear(&vi);
	  }
	  initialphase++;

	  /* got an initial page.  Is it beginning of stream? */
	  if(!ogg2_page_bos(&og) && pagecounter==0 && streaminfo_p)
	    if(warn_p || streaminfo_p)
	      fprintf(stdout,"WARN stream: first page (0) is not marked beginning of stream.\n\n");
	}	
	
	ogg2_stream_pagein(os,&og);
	pagecounter++;
	continue;
      }
    
      if(get_data()<=0){
	eof=1;
	break;
      }
    }
  }

  if(streaminfo_p)
    fprintf(stdout, "\nHit physical end of stream at %ld bytes.\n",
	    ftell(stdin));

  if(vorbiscount==0)
    fprintf(stdout,"No logical Vorbis streams found in data.\n");
  else
    fprintf(stdout,"%d logical Vorbis stream%s found in data.\n",
	    vorbiscount,(vorbiscount==1?"":"s"));
  
  fprintf(stdout,"Done.\n");
  
  ogg2_page_release(&og);
  ogg2_stream_destroy(os);
  ogg2_sync_destroy(oy);
  
  return 0;
}
static void tracer_dump(pid_t pid) {

  /* Read arguments from tracee */
  handle_events_until_dump_trap(-1);
  register_t ret = get_arg_from_regs(pid);
  assert(ret == TRAP_START_ARGS);

  debug_print("receive string from tracee %d\n", pid);
  ptrace_getdata(pid, (long) tracer_buff->str_tmp, loop_name, SIZE_LOOP);
  ptrace_syscall(pid);

  invocation = (int)receive_from_tracee(pid);
  ptrace_syscall(pid);

  int arg_count = (int)receive_from_tracee(pid);
  ptrace_syscall(pid);

  printf("DUMP( %s %d count = %d) \n", loop_name, invocation, arg_count);

  /* Ensure that the dump directory exists */
  snprintf(dump_path, sizeof(dump_path), "%s/%s/%s", dump_prefix, dump_root,
           loop_name);

  mkdir(dump_path, 0777);

  snprintf(dump_path, sizeof(dump_path), "%s/%s/%s/%d", dump_prefix, dump_root,
           loop_name, invocation);

  if (mkdir(dump_path, 0777) != 0)
    errx(EXIT_FAILURE, "dump %s already exists, stop\n", dump_path);

  int i;
  void *addresses[arg_count];
  for (i = 0; i < arg_count; i++) {
    addresses[i] = (void *)receive_from_tracee(pid);
    ptrace_syscall(pid);
  }

  /* Wait for end of arguments sigtrap */
  handle_events_until_dump_trap(pid);
  ret = get_arg_from_regs(pid);
  assert(ret == TRAP_END_ARGS);

  /* Dump hotpages to disk */
  flush_hot_pages_trace_to_disk(pid);

  char lel_bin_path[1024];
  /* Link to the original binary */
  snprintf(lel_bin_path, sizeof(lel_bin_path), "%s/lel_bin", dump_path);
  int res =
      linkat(AT_FDCWD, "lel_bin", AT_FDCWD, lel_bin_path, AT_SYMLINK_FOLLOW);
  if (res == -1)
    errx(EXIT_FAILURE, "Error copying the dump binary\n");

  for (i = 0; i < arg_count; i++) {
    void *start_of_page = round_to_page(addresses[i]);
    if (start_of_page != NULL) {
      unprotect_i(pid, start_of_page, PAGESIZE);
      dump_page(pid, start_of_page);
    }
  }

  if (firsttouch_active) {
    dump_firsttouch();
  }

  dump_core(arg_count, addresses);
  dump_unprotected_pages(pid);
}
int __m4u_get_user_pages(int eModuleID, struct task_struct *tsk, struct mm_struct *mm, 
                     unsigned long start, int nr_pages, unsigned int gup_flags,
                     struct page **pages, struct vm_area_struct **vmas)
{
        int i;
        unsigned long vm_flags;
	int trycnt;

        if (nr_pages <= 0)
                return 0;

        //VM_BUG_ON(!!pages != !!(gup_flags & FOLL_GET));
        if(!!pages != !!(gup_flags & FOLL_GET)) {
            M4UMSG(" error: __m4u_get_user_pages !!pages != !!(gup_flags & FOLL_GET), pages=0x%x, gup_flags & FOLL_GET=0x%x \n",
                    (unsigned int)pages, gup_flags & FOLL_GET);
        }

        /*   
         * Require read or write permissions.
         * If FOLL_FORCE is set, we only require the "MAY" flags.
         */
        vm_flags  = (gup_flags & FOLL_WRITE) ?
                        (VM_WRITE | VM_MAYWRITE) : (VM_READ | VM_MAYREAD);
        vm_flags &= (gup_flags & FOLL_FORCE) ?
                        (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
        i = 0; 

        M4UDBG("Trying to get_user_pages from start vaddr 0x%08x with %d pages\n", start, nr_pages);

        do { 
                struct vm_area_struct *vma;
                M4UDBG("For a new vma area from 0x%08x\n", start);
                vma = find_extend_vma(mm, start);

                if (!vma)
                {
                    M4UMSG("error: the vma is not found, start=0x%x, module=%d \n", 
                           (unsigned int)start, eModuleID);
                    return i ? i : -EFAULT;
                } 
                if( ((~vma->vm_flags) & (VM_IO|VM_PFNMAP|VM_SHARED|VM_WRITE)) == 0 )
                {
                    M4UMSG("error: m4u_get_pages(): bypass pmem garbage pages! vma->vm_flags=0x%x, start=0x%x, module=%d \n", 
                            (unsigned int)(vma->vm_flags), (unsigned int)start, eModuleID);
                	return i ? i : -EFAULT;;
                }                     
                if(vma->vm_flags & VM_IO)
                {
                	  M4UDBG("warning: vma is marked as VM_IO \n");
                }
                if(vma->vm_flags & VM_PFNMAP)
                {
                    M4UMSG("error: vma permission is not correct, vma->vm_flags=0x%x, start=0x%x, module=%d \n", 
                            (unsigned int)(vma->vm_flags), (unsigned int)start, eModuleID);
                    M4UMSG("hint: maybe the memory is remapped with un-permitted vma->vm_flags! \n");          
                    //m4u_dump_maps(start);
                    return i ? i : -EFAULT;;
                }
                if(!(vm_flags & vma->vm_flags)) 
                {
                    M4UMSG("error: vm_flags invalid, vm_flags=0x%x, vma->vm_flags=0x%x, start=0x%x, module=%d \n", 
                           (unsigned int)vm_flags,
                           (unsigned int)(vma->vm_flags), 
                           (unsigned int)start,
                            eModuleID);
                    //m4u_dump_maps(start);                  
                    return i ? : -EFAULT;
                }

                do {
                        struct page *page;
                        unsigned int foll_flags = gup_flags;
                        /*
                         * If we have a pending SIGKILL, don't keep faulting
                         * pages and potentially allocating memory.
                         */
                        if (unlikely(fatal_signal_pending(current)))
                                return i ? i : -ERESTARTSYS;
                        MMProfileLogEx(M4U_MMP_Events[PROFILE_FOLLOW_PAGE], MMProfileFlagStart, eModuleID, start&(~0xFFF));
                        page = follow_page(vma, start, foll_flags);
                        MMProfileLogEx(M4U_MMP_Events[PROFILE_FOLLOW_PAGE], MMProfileFlagEnd, eModuleID, 0x1000);
                        while (!page) {
                                int ret;

                                M4UDBG("Trying to allocate for %dth page(vaddr: 0x%08x)\n", i, start);
                                MMProfileLogEx(M4U_MMP_Events[PROFILE_FORCE_PAGING], MMProfileFlagStart, eModuleID, start&(~0xFFF));
                                ret = handle_mm_fault(mm, vma, start,
                                        (foll_flags & FOLL_WRITE) ?
                                        FAULT_FLAG_WRITE : 0);
                                MMProfileLogEx(M4U_MMP_Events[PROFILE_FORCE_PAGING], MMProfileFlagEnd, eModuleID, 0x1000);
                                if (ret & VM_FAULT_ERROR) {
                                        if (ret & VM_FAULT_OOM) {
                                                M4UMSG("handle_mm_fault() error: no memory, aaddr:0x%08lx (%d pages are allocated), module=%d\n", 
                                                start, i, eModuleID);
                                                //m4u_dump_maps(start);
                                                return i ? i : -ENOMEM;
					                    }
                                        if (ret &
                                            (VM_FAULT_HWPOISON|VM_FAULT_SIGBUS)) {
                                                M4UMSG("handle_mm_fault() error: invalide memory address, vaddr:0x%lx (%d pages are allocated), module=%d\n", 
                                                start, i, eModuleID);
                                                //m4u_dump_maps(start);
                                                return i ? i : -EFAULT;
					                    }
                                        BUG();
                                }
                                if (ret & VM_FAULT_MAJOR)
                                        tsk->maj_flt++;
                                else
                                        tsk->min_flt++;

                                /*
                                 * The VM_FAULT_WRITE bit tells us that
                                 * do_wp_page has broken COW when necessary,
                                 * even if maybe_mkwrite decided not to set
                                 * pte_write. We can thus safely do subsequent
                                 * page lookups as if they were reads. But only
                                 * do so when looping for pte_write is futile:
                                 * in some cases userspace may also be wanting
                                 * to write to the gotten user page, which a
                                 * read fault here might prevent (a readonly
                                 * page might get reCOWed by userspace write).
                                 */
                                if ((ret & VM_FAULT_WRITE) &&
                                    !(vma->vm_flags & VM_WRITE))
                                        foll_flags &= ~FOLL_WRITE;
                                MMProfileLogEx(M4U_MMP_Events[PROFILE_FOLLOW_PAGE], MMProfileFlagStart, eModuleID, start&(~0xFFF));
                                page = follow_page(vma, start, foll_flags);
                                MMProfileLogEx(M4U_MMP_Events[PROFILE_FOLLOW_PAGE], MMProfileFlagEnd, eModuleID, 0x1000);
                        }
                        if (IS_ERR(page)) {
                                M4UMSG("handle_mm_fault() error: faulty page is returned, vaddr:0x%lx (%d pages are allocated), module=%d \n", 
                                        start, i, eModuleID);
                                //m4u_dump_maps(start);
                                return i ? i : PTR_ERR(page);
			            }
                        if (pages) {
                                pages[i] = page;
                                MMProfileLogEx(M4U_MMP_Events[PROFILE_MLOCK], MMProfileFlagStart, eModuleID, start&(~0xFFF));
				
				/* Use retry version to guarantee it will succeed in getting the lock */
				trycnt = 3000;
				do {
					if (trylock_page(page)) {
						mlock_vma_page(page);
						unlock_page(page);

                        //make sure hw pte is not 0
                        {
                            int i;
                            for(i=0; i<3000; i++)
                            {   
                                if(!m4u_user_v2p(start))
                                {
                                    handle_mm_fault(mm, vma, start, (foll_flags & FOLL_WRITE)? FAULT_FLAG_WRITE : 0);
                                    cond_resched();
                                }
                                else
                                    break;
                            }
                            if(i==3000)
                                M4UMSG("error: cannot handle_mm_fault to get hw pte: va=0x%x\n", start);
                        }

                        break;
					}
				} while (trycnt-- > 0);

                                if(PageMlocked(page)==0)
                                {
                                    M4UMSG("Can't mlock page\n");
                                    dump_page(page);
                                }
                                else
                                {
                                    unsigned int pfn = page_to_pfn(page);
                                    if(pfn < mlock_cnt_size)
                                    {
                                        pMlock_cnt[page_to_pfn(page)]++;
                                    }
                                    else
                                    {
                                        M4UERR("mlock_cnt_size is too small: pfn=%d, size=%d\n", pfn, mlock_cnt_size);
                                    }
                                    
                                    //M4UMSG("lock page:\n");
                                    //dump_page(page);
                                }
                                MMProfileLogEx(M4U_MMP_Events[PROFILE_MLOCK], MMProfileFlagEnd, eModuleID, 0x1000);

                        }
                        if (vmas)
                                vmas[i] = vma;
                        i++;
                        start += PAGE_SIZE;
                        nr_pages--;
                } while (nr_pages && start < vma->vm_end);
        } while (nr_pages);