From ceffaf2a37ffca83c84d4ef624d71742cba3e1a0 Mon Sep 17 00:00:00 2001 From: Suren Baghdasaryan Date: Thu, 4 Jan 2018 08:54:53 -0800 Subject: [PATCH 1/5] lmkd: Close cgroup.event_control file when done writing After events are specified by writing into cgroup.event_control file the file should be closed. Change-Id: Id015e6a7bac2b74bbc8d8793c85f529ee00bdf55 Signed-off-by: Suren Baghdasaryan --- lmkd.c | 1 + 1 file changed, 1 insertion(+) diff --git a/lmkd.c b/lmkd.c index 2bb3623..6def5f4 100644 --- a/lmkd.c +++ b/lmkd.c @@ -860,6 +860,7 @@ static bool init_mp_common(void *event_handler, enum vmpressure_level level) { } maxevents++; mpevfd[level] = evfd; + close(evctlfd); return true; err: From 3e1a849c2538cb1d11e85ef30a97bf3d9cb35ce5 Mon Sep 17 00:00:00 2001 From: Suren Baghdasaryan Date: Thu, 4 Jan 2018 09:16:21 -0800 Subject: [PATCH 2/5] lmkd: Detect the highest level of vmpressure when event is detected lmkd checks for vmpressure events using epoll_wait() with eventfds of all registered events. It's possible that multiple events of different priorities happen before epoll_wait() returns. For these cases we use conservative approach by assuming that the system is under the highest registered vmpressure levels. This speeds up lmkd response time to high memory pressure by not responding to possibly stale low pressure levels when vmpressure rises quickly. Bug: 63631020 Test: alloc-stress Change-Id: I79a85c3342e7e1b3a3be82945266b2cc60b437cf Signed-off-by: Suren Baghdasaryan --- lmkd.c | 19 ++++++++++++++----- 1 file changed, 14 insertions(+), 5 deletions(-) diff --git a/lmkd.c b/lmkd.c index 6def5f4..1b8fed3 100644 --- a/lmkd.c +++ b/lmkd.c @@ -102,7 +102,7 @@ static const char *level_name[] = { }; static int level_oomadj[VMPRESS_LEVEL_COUNT]; -static int mpevfd[VMPRESS_LEVEL_COUNT]; +static int mpevfd[VMPRESS_LEVEL_COUNT] = { -1, -1, -1 }; static bool debug_process_killing; static bool enable_pressure_upgrade; static int64_t upgrade_pressure; @@ -745,11 +745,20 @@ static void mp_event_common(enum vmpressure_level level) { unsigned long long evcount; int64_t mem_usage, memsw_usage; int64_t mem_pressure; + enum vmpressure_level lvl; - ret = read(mpevfd[level], &evcount, sizeof(evcount)); - if (ret < 0) - ALOGE("Error reading memory pressure event fd; errno=%d", - errno); + /* + * Check all event counters from low to critical + * and upgrade to the highest priority one. By reading + * eventfd we also reset the event counters. + */ + for (lvl = VMPRESS_LEVEL_LOW; lvl < VMPRESS_LEVEL_COUNT; lvl++) { + if (mpevfd[lvl] != -1 && + read(mpevfd[lvl], &evcount, sizeof(evcount)) > 0 && + evcount > 0 && lvl > level) { + level = lvl; + } + } mem_usage = get_memory_usage(MEMCG_MEMORY_USAGE); memsw_usage = get_memory_usage(MEMCG_MEMORYSW_USAGE); From 94ccd722eb9cb1e4d0e2827594a68cd5e44589ed Mon Sep 17 00:00:00 2001 From: Suren Baghdasaryan Date: Wed, 17 Jan 2018 17:17:44 -0800 Subject: [PATCH 3/5] lmkd: Allow killing multiple processes to downgrade memory pressure Record free memory at low vmpressure levels and whenever pressure increases beyond low free up enough memory to downgrade memory pressure to low. This is done by freeing enough memory to get to the max free memory levels seen during low vmpressure. The kill logic for Go devices is not changed as these devices are designed to operate under high memory pressure. Bug: 63631020 Test: alloc-stress Change-Id: Ic8396eee08013b1c709072a13525601d5c8bf1f1 Signed-off-by: Suren Baghdasaryan --- lmkd.c | 137 +++++++++++++++++++++++++++++++++++++++++++++++---------- 1 file changed, 114 insertions(+), 23 deletions(-) diff --git a/lmkd.c b/lmkd.c index 1b8fed3..ece4dff 100644 --- a/lmkd.c +++ b/lmkd.c @@ -29,6 +29,7 @@ #include #include #include +#include #include #include @@ -101,6 +102,16 @@ static const char *level_name[] = { "critical" }; +struct mem_size { + int free_mem; + int free_swap; +}; + +struct { + int min_free; /* recorded but not used yet */ + int max_free; +} low_pressure_mem = { -1, -1 }; + static int level_oomadj[VMPRESS_LEVEL_COUNT]; static int mpevfd[VMPRESS_LEVEL_COUNT] = { -1, -1, -1 }; static bool debug_process_killing; @@ -559,6 +570,18 @@ static int zoneinfo_parse(struct sysmeminfo *mip) { return 0; } +static int get_free_memory(struct mem_size *ms) { + struct sysinfo si; + + if (sysinfo(&si) < 0) + return -1; + + ms->free_mem = (int)(si.freeram * si.mem_unit / PAGE_SIZE); + ms->free_swap = (int)(si.freeswap * si.mem_unit / PAGE_SIZE); + + return 0; +} + static int proc_get_size(int pid) { char path[PATH_MAX]; char line[LINE_MAX]; @@ -676,34 +699,40 @@ static int kill_one_process(struct proc* procp, int min_score_adj, } /* - * Find a process to kill based on the current (possibly estimated) free memory - * and cached memory sizes. Returns the size of the killed processes. + * Find processes to kill to free required number of pages. + * If pages_to_free is set to 0 only one process will be killed. + * Returns the size of the killed processes. */ -static int find_and_kill_process(enum vmpressure_level level) { +static int find_and_kill_processes(enum vmpressure_level level, + int pages_to_free) { int i; - int killed_size = 0; + int killed_size; + int pages_freed = 0; int min_score_adj = level_oomadj[level]; for (i = OOM_SCORE_ADJ_MAX; i >= min_score_adj; i--) { struct proc *procp; -retry: - if (kill_heaviest_task) - procp = proc_get_heaviest(i); - else - procp = proc_adj_lru(i); + while (true) { + if (is_go_device) + procp = proc_adj_lru(i); + else + procp = proc_get_heaviest(i); + + if (!procp) + break; - if (procp) { killed_size = kill_one_process(procp, min_score_adj, level); - if (killed_size < 0) { - goto retry; - } else { - return killed_size; + if (killed_size >= 0) { + pages_freed += killed_size; + if (pages_freed >= pages_to_free) { + return pages_freed; + } } } } - return 0; + return pages_freed; } static int64_t get_memory_usage(const char* path) { @@ -730,6 +759,32 @@ static int64_t get_memory_usage(const char* path) { return mem_usage; } +void record_low_pressure_levels(struct mem_size *free_mem) { + if (low_pressure_mem.min_free == -1 || + low_pressure_mem.min_free > free_mem->free_mem) { + if (debug_process_killing) { + ALOGI("Low pressure min memory update from %d to %d", + low_pressure_mem.min_free, free_mem->free_mem); + } + low_pressure_mem.min_free = free_mem->free_mem; + } + /* + * Free memory at low vmpressure events occasionally gets spikes, + * possibly a stale low vmpressure event with memory already + * freed up (no memory pressure should have been reported). + * Ignore large jumps in max_free that would mess up our stats. + */ + if (low_pressure_mem.max_free == -1 || + (low_pressure_mem.max_free < free_mem->free_mem && + free_mem->free_mem - low_pressure_mem.max_free < low_pressure_mem.max_free * 0.1)) { + if (debug_process_killing) { + ALOGI("Low pressure max memory update from %d to %d", + low_pressure_mem.max_free, free_mem->free_mem); + } + low_pressure_mem.max_free = free_mem->free_mem; + } +} + enum vmpressure_level upgrade_level(enum vmpressure_level level) { return (enum vmpressure_level)((level < VMPRESS_LEVEL_CRITICAL) ? level + 1 : level); @@ -746,6 +801,7 @@ static void mp_event_common(enum vmpressure_level level) { int64_t mem_usage, memsw_usage; int64_t mem_pressure; enum vmpressure_level lvl; + struct mem_size free_mem; /* * Check all event counters from low to critical @@ -760,6 +816,20 @@ static void mp_event_common(enum vmpressure_level level) { } } + if (get_free_memory(&free_mem) == 0) { + if (level == VMPRESS_LEVEL_LOW) { + record_low_pressure_levels(&free_mem); + } + } else { + ALOGE("Failed to get free memory!"); + return; + } + + if (level_oomadj[level] > OOM_SCORE_ADJ_MAX) { + /* Do not monitor this pressure level */ + return; + } + mem_usage = get_memory_usage(MEMCG_MEMORY_USAGE); memsw_usage = get_memory_usage(MEMCG_MEMORYSW_USAGE); if (memsw_usage < 0 || mem_usage < 0) { @@ -796,9 +866,35 @@ static void mp_event_common(enum vmpressure_level level) { } do_kill: - if (find_and_kill_process(level) == 0) { - if (debug_process_killing) { - ALOGI("Nothing to kill"); + if (is_go_device) { + /* For Go devices kill only one task */ + if (find_and_kill_processes(level, 0) == 0) { + if (debug_process_killing) { + ALOGI("Nothing to kill"); + } + } + } else { + /* If pressure level is less than critical and enough free swap then ignore */ + if (level < VMPRESS_LEVEL_CRITICAL && free_mem.free_swap > low_pressure_mem.max_free) { + if (debug_process_killing) { + ALOGI("Ignoring pressure since %d swap pages are available ", free_mem.free_swap); + } + return; + } + + /* Free up enough memory to downgrate the memory pressure to low level */ + if (free_mem.free_mem < low_pressure_mem.max_free) { + int pages_to_free = low_pressure_mem.max_free - free_mem.free_mem; + if (debug_process_killing) { + ALOGI("Trying to free %d pages", pages_to_free); + } + int pages_freed = find_and_kill_processes(level, pages_to_free); + if (pages_freed < pages_to_free) { + if (debug_process_killing) { + ALOGI("Unable to free enough memory (pages freed=%d)", + pages_freed); + } + } } } } @@ -824,11 +920,6 @@ static bool init_mp_common(void *event_handler, enum vmpressure_level level) { int ret; const char *levelstr = level_name[level]; - if (level_oomadj[level] > OOM_SCORE_ADJ_MAX) { - ALOGI("%s pressure events are disabled", levelstr); - return true; - } - mpfd = open(MEMCG_SYSFS_PATH "memory.pressure_level", O_RDONLY | O_CLOEXEC); if (mpfd < 0) { ALOGI("No kernel memory.pressure_level support (errno=%d)", errno); From 30854e70d994c33130247126665f996f6f6fd6b4 Mon Sep 17 00:00:00 2001 From: Suren Baghdasaryan Date: Wed, 17 Jan 2018 17:28:01 -0800 Subject: [PATCH 4/5] lmkd: Implement kill timeout New ro.lmk.kill_timeout_ms property defines timeout in ms after a successful kill cycle for more kills to be considered. This is necessary because memory pressure after a kill does not go down instantly and system needs time to reflect new memory state. This timeout prevents extra kills in the period immediately after a kill cycle. By default it is set to 0 which disables this feature. Bug: 63631020 Test: alloc-stress Change-Id: Ia847118c8c4a659a7fc38cd5cd0042acb514ae28 Signed-off-by: Suren Baghdasaryan --- lmkd.c | 30 ++++++++++++++++++++++++++++++ 1 file changed, 30 insertions(+) diff --git a/lmkd.c b/lmkd.c index ece4dff..77285cb 100644 --- a/lmkd.c +++ b/lmkd.c @@ -120,6 +120,7 @@ static int64_t upgrade_pressure; static int64_t downgrade_pressure; static bool is_go_device; static bool kill_heaviest_task; +static unsigned long kill_timeout_ms; /* control socket listen and data */ static int ctrl_lfd; @@ -795,6 +796,12 @@ enum vmpressure_level downgrade_level(enum vmpressure_level level) { level - 1 : level); } +static inline unsigned long get_time_diff_ms(struct timeval *from, + struct timeval *to) { + return (to->tv_sec - from->tv_sec) * 1000 + + (to->tv_usec - from->tv_usec) / 1000; +} + static void mp_event_common(enum vmpressure_level level) { int ret; unsigned long long evcount; @@ -802,6 +809,8 @@ static void mp_event_common(enum vmpressure_level level) { int64_t mem_pressure; enum vmpressure_level lvl; struct mem_size free_mem; + static struct timeval last_report_tm; + static unsigned long skip_count = 0; /* * Check all event counters from low to critical @@ -816,6 +825,23 @@ static void mp_event_common(enum vmpressure_level level) { } } + if (kill_timeout_ms) { + struct timeval curr_tm; + gettimeofday(&curr_tm, NULL); + if (get_time_diff_ms(&last_report_tm, &curr_tm) < kill_timeout_ms) { + skip_count++; + return; + } + } + + if (skip_count > 0) { + if (debug_process_killing) { + ALOGI("%lu memory pressure events were skipped after a kill!", + skip_count); + } + skip_count = 0; + } + if (get_free_memory(&free_mem) == 0) { if (level == VMPRESS_LEVEL_LOW) { record_low_pressure_levels(&free_mem); @@ -894,6 +920,8 @@ do_kill: ALOGI("Unable to free enough memory (pages freed=%d)", pages_freed); } + } else { + gettimeofday(&last_report_tm, NULL); } } } @@ -1081,6 +1109,8 @@ int main(int argc __unused, char **argv __unused) { kill_heaviest_task = property_get_bool("ro.lmk.kill_heaviest_task", true); is_go_device = property_get_bool("ro.config.low_ram", false); + kill_timeout_ms = + (unsigned long)property_get_int32("ro.lmk.kill_timeout_ms", 0); if (mlockall(MCL_CURRENT | MCL_FUTURE)) ALOGW("mlockall failed: errno=%d", errno); From e6613ea04edc98e8482082a5b0640e38ea796404 Mon Sep 17 00:00:00 2001 From: Suren Baghdasaryan Date: Thu, 18 Jan 2018 17:27:30 -0800 Subject: [PATCH 5/5] lmkd: Select in-kernel vs userspace lmk based on kernel driver presence Currently selection criteria for in-kernel vs userspace lmk is kernel driver presence and device not being a Go device. This change removes Go device check leaving kernel driver presence to be the only selection criteria. Bug: 71502948 Change-Id: I394a7920433a8d090e207ea86296356413a63fe7 Signed-off-by: Suren Baghdasaryan --- lmkd.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lmkd.c b/lmkd.c index 77285cb..3230310 100644 --- a/lmkd.c +++ b/lmkd.c @@ -1038,7 +1038,7 @@ static int init(void) { maxevents++; has_inkernel_module = !access(INKERNEL_MINFREE_PATH, W_OK); - use_inkernel_interface = has_inkernel_module && !is_go_device; + use_inkernel_interface = has_inkernel_module; if (use_inkernel_interface) { ALOGI("Using in-kernel low memory killer interface");