Example #1
0
int shm_swap (int prio, int gfp_mask)
{
    pte_t page;
    struct shmid_kernel *shp;
    unsigned long swap_nr;
    unsigned long id, idx;
    int loop = 0;
    int counter;

    counter = shm_rss >> prio;
    if (!counter || !(swap_nr = get_swap_page()))
        return 0;

check_id:
    shp = shm_segs[swap_id];
    if (shp == IPC_UNUSED || shp == IPC_NOID || shp->u.shm_perm.mode & SHM_LOCKED ) {
next_id:
        swap_idx = 0;
        if (++swap_id > max_shmid) {
            swap_id = 0;
            if (loop)
                goto failed;
            loop = 1;
        }
        goto check_id;
    }
    id = swap_id;

check_table:
    idx = swap_idx++;
    if (idx >= shp->shm_npages)
        goto next_id;

    page = __pte(shp->shm_pages[idx]);
    if (!pte_present(page))
        goto check_table;
    if ((gfp_mask & __GFP_DMA) && !PageDMA(&mem_map[MAP_NR(pte_page(page))]))
        goto check_table;
    swap_attempts++;

    if (--counter < 0) { /* failed */
failed:
        swap_free (swap_nr);
        return 0;
    }
    if (atomic_read(&mem_map[MAP_NR(pte_page(page))].count) != 1)
        goto check_table;
    shp->shm_pages[idx] = swap_nr;
    rw_swap_page_nocache (WRITE, swap_nr, (char *) pte_page(page));
    free_page(pte_page(page));
    swap_successes++;
    shm_swp++;
    shm_rss--;
    return 1;
}
Example #2
0
/*
 * Show free area list (used inside shift_scroll-lock stuff)
 * We also calculate the percentage fragmentation. We do this by counting the
 * memory on each free list with the exception of the first item on the list.
 */
int Jcmd_mem(char *arg1, char *arg2) {
	unsigned long order, flags;
	unsigned long total = 0;

	spin_lock_irqsave(&free_area_lock, flags);
	for (order = 0; order < NR_MEM_LISTS; order++) {
		struct page * tmp;
		unsigned long nr = 0;
		for (tmp = free_mem_area[order].next;
				tmp != memory_head(free_mem_area+order); tmp = tmp->next) {
			nr++;
		}
		total += nr << order;
		ut_printf("%d(%d): count:%d  static count:%d total:%d (%dM)\n", order,1<<order, nr,
				free_mem_area[order].stat_count, (nr << order), ((nr << order)*PAGE_SIZE)/(1024*1024));
	}
	spin_unlock_irqrestore(&free_area_lock, flags);
	ut_printf("total Free pages = %d (%dM) Actual pages: %d (%dM) pagecachesize: %dM , freepages:%d\n", total, (total * 4) / 1024,g_stat_mem_size/PAGE_SIZE,g_stat_mem_size/(1024*1024),g_pagecache_size/(1024*1024),g_nr_free_pages);

	int slab=0;
	int referenced=0;
	int reserved=0;
	int dma=0;
	unsigned long va_end=(unsigned long)__va(g_phy_mem_size);

	page_struct_t *p;
	p = g_mem_map + MAP_NR(va_end);
	do {
		--p;
		if (PageReserved(p)) reserved++;
		if (PageDMA(p)) dma++;
		if (PageReferenced(p))referenced++;
		if (PageSlab(p)) slab++;
	} while (p > g_mem_map);
	ut_printf(" reserved :%d(%dM) referenced:%d dma:%d slab:%d  stat_allocs:%d stat_frees: %d\n\n",reserved,(reserved*PAGE_SIZE)/(1024*1024),referenced,dma,slab,stat_allocs,stat_frees);
	if ((arg1 != 0) && (ut_strcmp(arg1,"all")==0))
		Jcmd_jslab(0,0);
	return 1;
}