static void mlog_meminfo(void) { unsigned long memfree; unsigned long swapfree; unsigned long cached; unsigned int gpuuse = 0; unsigned int gpu_page_cache = 0; unsigned long mlock; unsigned long zram; unsigned long active, inactive; unsigned long shmem; memfree = P2K(global_page_state(NR_FREE_PAGES) + mtkpasr_show_page_reserved()); #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 0)) swapfree = P2K(get_nr_swap_pages()); cached = P2K(global_page_state(NR_FILE_PAGES) - total_swapcache_pages()); #else swapfree = P2K(get_nr_swap_pages()); cached = P2K(global_page_state(NR_FILE_PAGES) - total_swapcache_pages); #endif #ifdef COLLECT_GPU_MEMINFO if (mtk_get_gpu_memory_usage(&gpuuse)) gpuuse = B2K(gpuuse); if (mtk_get_gpu_page_cache(&gpu_page_cache)) gpu_page_cache = B2K(gpu_page_cache); #endif mlock = P2K(global_page_state(NR_MLOCK)); #if defined(CONFIG_ZRAM) & defined(CONFIG_ZSMALLOC) zram = (zram_devices && zram_devices->init_done && zram_devices->meta) ? B2K(zs_get_total_size_bytes(zram_devices->meta->mem_pool)) : 0; #else zram = 0; #endif active = P2K(global_page_state(NR_ACTIVE_ANON) + global_page_state(NR_ACTIVE_FILE)); inactive = P2K(global_page_state(NR_INACTIVE_ANON) + global_page_state(NR_INACTIVE_FILE)); /* MLOG_PRINTK("active: %lu, inactive: %lu\n", active, inactive); */ shmem = P2K(global_page_state(NR_SHMEM)); spin_lock_bh(&mlogbuf_lock); mlog_emit_32(memfree); mlog_emit_32(swapfree); mlog_emit_32(cached); mlog_emit_32(gpuuse); mlog_emit_32(gpu_page_cache); mlog_emit_32(mlock); mlog_emit_32(zram); mlog_emit_32(active); mlog_emit_32(inactive); mlog_emit_32(shmem); spin_unlock_bh(&mlogbuf_lock); }
/* Show overall executing status */ static ssize_t execstate_show(struct device *dev, struct device_attribute *attr, char *buf) { struct mtkpasr *mtkpasr = dev_to_mtkpasr(dev); u64 val = 0; int len = 0, tmp; /* Compression status */ tmp = sprintf(buf, "Good compress [%u] : Bad compress [%u]\n", mtkpasr->stats.good_compress, mtkpasr->stats.bad_compress); buf += tmp; len += tmp; /* Debug level */ tmp = sprintf(buf, "%d\n", mtkpasr_debug_level); buf += tmp; len += tmp; /* Enable status */ tmp = sprintf(buf, "%d [%d]\n", mtkpasr_enable, mtkpasr_enable_sr); buf += tmp; len += tmp; /* Available size for external compression */ val = (u64)(mtkpasr_acquire_total() - mtkpasr_acquire_frees()); tmp = sprintf(buf, "%llu\n", val); buf += tmp; len += tmp; /* Bank/Rank information */ tmp = mtkpasr_show_banks(buf); buf += tmp; len += tmp; /* MTKPASR status */ tmp = sprintf(buf, "Enter [%lu]times - Fail [%lu]times :: Last Success - SR-OFF[0x%x] DPD[0x%x]\n" , mtkpasr_triggered, failed_mtkpasr, mtkpasr_sroff, mtkpasr_dpd); buf += tmp; len += tmp; /* Page reserved by MTKPASR */ tmp = sprintf(buf, "Reserved pages [%lu]\n", mtkpasr_show_page_reserved()); buf += tmp; len += tmp; /* SR mask */ tmp = sprintf(buf, "%lu\n", mtkpasr_control); buf += tmp; len += tmp; return len; }
//static void mlog_buddyinfo(void) void mlog_buddyinfo(void) { int i; struct zone *zone; struct zone *node_zones; unsigned int order; int zone_nr = 0; unsigned long normal_nr_free[MAX_ORDER]; unsigned long high_nr_free[MAX_ORDER]; for_each_online_node(i) { pg_data_t *pgdat = NODE_DATA(i); unsigned long flags; node_zones = pgdat->node_zones; // MAX_NR_ZONES 3 for (zone = node_zones; zone - node_zones < MAX_NR_ZONES; ++zone) { if (!populated_zone(zone)) continue; spin_lock_irqsave(&zone->lock, flags); zone_nr++; for (order = 0; order < MAX_ORDER; ++order) { if (zone_nr == 1) normal_nr_free[order] = zone->free_area[order].nr_free; if (zone_nr == 2) high_nr_free[order] = zone->free_area[order].nr_free; } spin_unlock_irqrestore(&zone->lock, flags); } } if(zone_nr == 1) { for (order = 0; order < MAX_ORDER; ++order) high_nr_free[order] = 0; } #ifdef CONFIG_MTKPASR if(zone_nr == 2) { high_nr_free[MAX_ORDER - 1] += (mtkpasr_show_page_reserved() >> (MAX_ORDER - 1)); }
static ssize_t page_reserved_show(struct device *dev, struct device_attribute *attr, char *buf) { return sprintf(buf, "Reserved pages [%lu]\n", mtkpasr_show_page_reserved()); }