static inline struct page_descriptor *get_kmalloc_pages( unsigned long priority, unsigned long order, int dma ) { uint32 nPtr; int nRetryCount = 0; for ( ;; ) { nPtr = get_free_pages( /* priority, */ ( 1 << order ), dma | MEMF_CLEAR ); if ( ( priority & MEMF_NOBLOCK ) || nPtr != 0 ) { if ( nPtr != 0 ) { atomic_add( &g_sSysBase.ex_nKernelMemPages, ( 1 << order ) ); } break; } if ( shrink_caches( ( 1 << order ) * PAGE_SIZE ) == 0 && ( priority & MEMF_OKTOFAIL ) ) { break; } if ( nRetryCount++ > 100 ) { if ( nRetryCount % 10 == 0 ) { printk( "get_kmalloc_pages( %d ) retried %d times\n", ( 1 << order ), nRetryCount ); } } } return ( ( struct page_descriptor * )nPtr ); }
int try_to_free_pages(zone_t *classzone, unsigned int gfp_mask, unsigned int order) { int priority = DEF_PRIORITY; int nr_pages = SWAP_CLUSTER_MAX; gfp_mask = pf_gfp_mask(gfp_mask); do { nr_pages = shrink_caches(classzone, priority, gfp_mask, nr_pages); if (nr_pages <= 0) return 1; } while (--priority); /* * Hmm.. Cache shrink failed - time to kill something? * Mhwahahhaha! This is the part I really like. Giggle. */ out_of_memory(); return 0; }
int try_to_free_pages_zone(zone_t *classzone, unsigned int gfp_mask) { gfp_mask = pf_gfp_mask(gfp_mask); for (;;) { int tries = vm_passes; int failed_swapout = !(gfp_mask & __GFP_IO); int nr_pages = SWAP_CLUSTER_MAX; do { nr_pages = shrink_caches(classzone, gfp_mask, nr_pages, &failed_swapout); if (nr_pages <= 0) return 1; shrink_dcache_memory(vm_vfs_scan_ratio, gfp_mask); shrink_icache_memory(vm_vfs_scan_ratio, gfp_mask); #ifdef CONFIG_QUOTA shrink_dqcache_memory(vm_vfs_scan_ratio, gfp_mask); #endif if (!failed_swapout) failed_swapout = !swap_out(classzone); } while (--tries); #ifdef CONFIG_OOM_KILLER out_of_memory(); #else if (likely(current->pid != 1)) break; if (!check_classzone_need_balance(classzone)) break; __set_current_state(TASK_RUNNING); yield(); #endif } return 0; }