Snap for 6948038 from f87d86481db89928018b449eac42c4d534e9add0 to rvc-platform-release
Change-Id: I2f2686d46384041d14d3a1159a1bd3ca4666816b
diff --git a/event.logtags b/event.logtags
index 452f411..fcbb960 100644
--- a/event.logtags
+++ b/event.logtags
@@ -35,4 +35,4 @@
# TODO: generate ".java" and ".h" files with integer constants from this file.
# for killinfo logs
-10195355 killinfo (Pid|1|5),(Uid|1|5),(OomAdj|1),(MinOomAdj|1),(TaskSize|1),(enum kill_reasons|1|5),(MemFree|1),(Cached|1),(SwapCached|1),(Buffers|1),(Shmem|1),(Unevictable|1),(SwapTotal|1),(SwapFree|1),(ActiveAnon|1),(InactiveAnon|1),(ActiveFile|1),(InactiveFile|1),(SReclaimable|1),(SUnreclaim|1),(KernelStack|1),(PageTables|1),(IonHeap|1),(IonHeapPool|1),(CmaFree|1)
+10195355 killinfo (Pid|1|5),(Uid|1|5),(OomAdj|1),(MinOomAdj|1),(TaskSize|1),(enum kill_reasons|1|5),(MemFree|1),(Cached|1),(SwapCached|1),(Buffers|1),(Shmem|1),(Unevictable|1),(SwapTotal|1),(SwapFree|1),(ActiveAnon|1),(InactiveAnon|1),(ActiveFile|1),(InactiveFile|1),(SReclaimable|1),(SUnreclaim|1),(KernelStack|1),(PageTables|1),(IonHeap|1),(IonHeapPool|1),(CmaFree|1),(MsSinceEvent|1),(MsSincePrevWakeup|1),(WakeupsSinceEvent|1),(SkippedWakeups|1)
diff --git a/lmkd.cpp b/lmkd.cpp
index 882ae4a..b065bbf 100644
--- a/lmkd.cpp
+++ b/lmkd.cpp
@@ -98,6 +98,7 @@
#define EIGHT_MEGA (1 << 23)
#define TARGET_UPDATE_MIN_INTERVAL_MS 1000
+#define THRASHING_RESET_INTERVAL_MS 1000
#define NS_PER_MS (NS_PER_SEC / MS_PER_SEC)
#define US_PER_MS (US_PER_SEC / MS_PER_SEC)
@@ -796,8 +797,18 @@
ctrl_data_write_lmk_kill_occurred((pid_t)pid, (uid_t)uid);
mem_st.process_start_time_ns = starttime * (NS_PER_SEC / sysconf(_SC_CLK_TCK));
mem_st.rss_in_bytes = rss_in_pages * PAGE_SIZE;
- stats_write_lmk_kill_occurred_pid(uid, pid, oom_score_adj,
- min_score_adj, 0, &mem_st);
+
+ struct kill_stat kill_st = {
+ .uid = static_cast<int32_t>(uid),
+ .kill_reason = NONE,
+ .oom_score = oom_score_adj,
+ .min_oom_score = min_score_adj,
+ .free_mem_kb = 0,
+ .free_swap_kb = 0,
+ .tasksize = 0,
+
+ };
+ stats_write_lmk_kill_occurred_pid(pid, &kill_st, &mem_st);
}
free(taskname);
@@ -1818,8 +1829,44 @@
return 0;
}
+enum wakeup_reason {
+ Event,
+ Polling
+};
+
+struct wakeup_info {
+ struct timespec wakeup_tm;
+ struct timespec prev_wakeup_tm;
+ struct timespec last_event_tm;
+ int wakeups_since_event;
+ int skipped_wakeups;
+};
+
+/*
+ * After the initial memory pressure event is received lmkd schedules periodic wakeups to check
+ * the memory conditions and kill if needed (polling). This is done because pressure events are
+ * rate-limited and memory conditions can change in between events. Therefore after the initial
+ * event there might be multiple wakeups. This function records the wakeup information such as the
+ * timestamps of the last event and the last wakeup, the number of wakeups since the last event
+ * and how many of those wakeups were skipped (some wakeups are skipped if previously killed
+ * process is still freeing its memory).
+ */
+static void record_wakeup_time(struct timespec *tm, enum wakeup_reason reason,
+ struct wakeup_info *wi) {
+ wi->prev_wakeup_tm = wi->wakeup_tm;
+ wi->wakeup_tm = *tm;
+ if (reason == Event) {
+ wi->last_event_tm = *tm;
+ wi->wakeups_since_event = 0;
+ wi->skipped_wakeups = 0;
+ } else {
+ wi->wakeups_since_event++;
+ }
+}
+
static void killinfo_log(struct proc* procp, int min_oom_score, int tasksize,
- int kill_reason, union meminfo *mi) {
+ int kill_reason, union meminfo *mi,
+ struct wakeup_info *wi, struct timespec *tm) {
/* log process information */
android_log_write_int32(ctx, procp->pid);
android_log_write_int32(ctx, procp->uid);
@@ -1833,6 +1880,12 @@
android_log_write_int32(ctx, (int32_t)min(mi->arr[field_idx] * page_k, INT32_MAX));
}
+ /* log lmkd wakeup information */
+ android_log_write_int32(ctx, (int32_t)get_time_diff_ms(&wi->last_event_tm, tm));
+ android_log_write_int32(ctx, (int32_t)get_time_diff_ms(&wi->prev_wakeup_tm, tm));
+ android_log_write_int32(ctx, wi->wakeups_since_event);
+ android_log_write_int32(ctx, wi->skipped_wakeups);
+
android_log_write_list(ctx, LOG_ID_EVENTS);
android_log_reset(ctx);
}
@@ -1998,8 +2051,9 @@
}
/* Kill one process specified by procp. Returns the size of the process killed */
-static int kill_one_process(struct proc* procp, int min_oom_score, int kill_reason,
- const char *kill_desc, union meminfo *mi, struct timespec *tm) {
+static int kill_one_process(struct proc* procp, int min_oom_score, enum kill_reasons kill_reason,
+ const char *kill_desc, union meminfo *mi, struct wakeup_info *wi,
+ struct timespec *tm) {
int pid = procp->pid;
int pidfd = procp->pidfd;
uid_t uid = procp->uid;
@@ -2010,6 +2064,7 @@
int result = -1;
struct memory_stat *mem_st;
char buf[LINE_MAX];
+ struct kill_stat kill_st;
tgid = proc_get_tgid(pid);
if (tgid >= 0 && tgid != pid) {
@@ -2055,7 +2110,7 @@
inc_killcnt(procp->oomadj);
- killinfo_log(procp, min_oom_score, tasksize, kill_reason, mi);
+ killinfo_log(procp, min_oom_score, tasksize, kill_reason, mi, wi, tm);
if (kill_desc) {
ALOGI("Kill '%s' (%d), uid %d, oom_adj %d to free %ldkB; reason: %s", taskname, pid,
@@ -2065,7 +2120,15 @@
uid, procp->oomadj, tasksize * page_k);
}
- stats_write_lmk_kill_occurred(uid, taskname, procp->oomadj, min_oom_score, tasksize, mem_st);
+ kill_st.uid = static_cast<int32_t>(uid);
+ kill_st.taskname = taskname;
+ kill_st.kill_reason = kill_reason;
+ kill_st.oom_score = procp->oomadj;
+ kill_st.min_oom_score = min_oom_score;
+ kill_st.free_mem_kb = mi->field.nr_free_pages * page_k;
+ kill_st.free_swap_kb = mi->field.free_swap * page_k;
+ kill_st.tasksize = tasksize;
+ stats_write_lmk_kill_occurred(&kill_st, mem_st);
ctrl_data_write_lmk_kill_occurred((pid_t)pid, uid);
@@ -2084,8 +2147,9 @@
* Find one process to kill at or above the given oom_adj level.
* Returns size of the killed process.
*/
-static int find_and_kill_process(int min_score_adj, int kill_reason, const char *kill_desc,
- union meminfo *mi, struct timespec *tm) {
+static int find_and_kill_process(int min_score_adj, enum kill_reasons kill_reason,
+ const char *kill_desc, union meminfo *mi,
+ struct wakeup_info *wi, struct timespec *tm) {
int i;
int killed_size = 0;
bool lmk_state_change_start = false;
@@ -2100,7 +2164,8 @@
if (!procp)
break;
- killed_size = kill_one_process(procp, min_score_adj, kill_reason, kill_desc, mi, tm);
+ killed_size = kill_one_process(procp, min_score_adj, kill_reason, kill_desc,
+ mi, wi, tm);
if (killed_size >= 0) {
if (!lmk_state_change_start) {
lmk_state_change_start = true;
@@ -2231,31 +2296,24 @@
}
static void mp_event_psi(int data, uint32_t events, struct polling_params *poll_params) {
- enum kill_reasons {
- NONE = -1, /* To denote no kill condition */
- PRESSURE_AFTER_KILL = 0,
- NOT_RESPONDING,
- LOW_SWAP_AND_THRASHING,
- LOW_MEM_AND_SWAP,
- LOW_MEM_AND_THRASHING,
- DIRECT_RECL_AND_THRASHING,
- KILL_REASON_COUNT
- };
enum reclaim_state {
NO_RECLAIM = 0,
KSWAPD_RECLAIM,
DIRECT_RECLAIM,
};
static int64_t init_ws_refault;
+ static int64_t prev_workingset_refault;
static int64_t base_file_lru;
static int64_t init_pgscan_kswapd;
static int64_t init_pgscan_direct;
static int64_t swap_low_threshold;
static bool killing;
- static int thrashing_limit;
- static bool in_reclaim;
+ static int thrashing_limit = thrashing_limit_pct;
static struct zone_watermarks watermarks;
static struct timespec wmark_update_tm;
+ static struct wakeup_info wi;
+ static struct timespec thrashing_reset_tm;
+ static int64_t prev_thrash_growth = 0;
union meminfo mi;
union vmstat vs;
@@ -2270,16 +2328,20 @@
char kill_desc[LINE_MAX];
bool cut_thrashing_limit = false;
int min_score_adj = 0;
+ long since_thrashing_reset_ms;
if (clock_gettime(CLOCK_MONOTONIC_COARSE, &curr_tm) != 0) {
ALOGE("Failed to get current time");
return;
}
+ record_wakeup_time(&curr_tm, events ? Event : Polling, &wi);
+
bool kill_pending = is_kill_pending();
if (kill_pending && (kill_timeout_ms == 0 ||
get_time_diff_ms(&last_kill_tm, &curr_tm) < static_cast<long>(kill_timeout_ms))) {
/* Skip while still killing a process */
+ wi.skipped_wakeups++;
goto no_kill;
}
/*
@@ -2305,6 +2367,8 @@
/* Reset file-backed pagecache size and refault amounts after a kill */
base_file_lru = vs.field.nr_inactive_file + vs.field.nr_active_file;
init_ws_refault = vs.field.workingset_refault;
+ thrashing_reset_tm = curr_tm;
+ prev_thrash_growth = 0;
}
/* Check free swap levels */
@@ -2323,22 +2387,51 @@
} else if (vs.field.pgscan_kswapd > init_pgscan_kswapd) {
init_pgscan_kswapd = vs.field.pgscan_kswapd;
reclaim = KSWAPD_RECLAIM;
- } else {
- in_reclaim = false;
- /* Skip if system is not reclaiming */
+ } else if (vs.field.workingset_refault == prev_workingset_refault) {
+ /* Device is not thrashing and not reclaiming, bail out early until we see these stats changing*/
goto no_kill;
}
- if (!in_reclaim) {
- /* Record file-backed pagecache size when entering reclaim cycle */
+ prev_workingset_refault = vs.field.workingset_refault;
+
+ /*
+ * It's possible we fail to find an eligible process to kill (ex. no process is
+ * above oom_adj_min). When this happens, we should retry to find a new process
+ * for a kill whenever a new eligible process is available. This is especially
+ * important for a slow growing refault case. While retrying, we should keep
+ * monitoring new thrashing counter as someone could release the memory to mitigate
+ * the thrashing. Thus, when thrashing reset window comes, we decay the prev thrashing
+ * counter by window counts. if the counter is still greater than thrashing limit,
+ * we preserve the current prev_thrash counter so we will retry kill again. Otherwise,
+ * we reset the prev_thrash counter so we will stop retrying.
+ */
+ since_thrashing_reset_ms = get_time_diff_ms(&thrashing_reset_tm, &curr_tm);
+ if (since_thrashing_reset_ms > THRASHING_RESET_INTERVAL_MS) {
+ long windows_passed;
+ /* Calculate prev_thrash_growth if we crossed THRASHING_RESET_INTERVAL_MS */
+ prev_thrash_growth = (vs.field.workingset_refault - init_ws_refault) * 100
+ / (base_file_lru + 1);
+ windows_passed = (since_thrashing_reset_ms / THRASHING_RESET_INTERVAL_MS);
+ /*
+ * Decay prev_thrashing unless over-the-limit thrashing was registered in the window we
+ * just crossed, which means there were no eligible processes to kill. We preserve the
+ * counter in that case to ensure a kill if a new eligible process appears.
+ */
+ if (windows_passed > 1 || prev_thrash_growth < thrashing_limit) {
+ prev_thrash_growth >>= windows_passed;
+ }
+
+ /* Record file-backed pagecache size when crossing THRASHING_RESET_INTERVAL_MS */
base_file_lru = vs.field.nr_inactive_file + vs.field.nr_active_file;
init_ws_refault = vs.field.workingset_refault;
+ thrashing_reset_tm = curr_tm;
thrashing_limit = thrashing_limit_pct;
} else {
/* Calculate what % of the file-backed pagecache refaulted so far */
- thrashing = (vs.field.workingset_refault - init_ws_refault) * 100 / base_file_lru;
+ thrashing = (vs.field.workingset_refault - init_ws_refault) * 100 / (base_file_lru + 1);
}
- in_reclaim = true;
+ /* Add previous cycle's decayed thrashing amount */
+ thrashing += prev_thrash_growth;
/*
* Refresh watermarks once per min in case user updated one of the margins.
@@ -2355,7 +2448,7 @@
calc_zone_watermarks(&zi, &watermarks);
wmark_update_tm = curr_tm;
- }
+ }
/* Find out which watermark is breached if any */
wmark = get_lowest_watermark(&mi, &watermarks);
@@ -2421,7 +2514,7 @@
/* Kill a process if necessary */
if (kill_reason != NONE) {
int pages_freed = find_and_kill_process(min_score_adj, kill_reason, kill_desc, &mi,
- &curr_tm);
+ &wi, &curr_tm);
if (pages_freed > 0) {
killing = true;
if (cut_thrashing_limit) {
@@ -2482,6 +2575,7 @@
.filename = MEMCG_MEMORYSW_USAGE,
.fd = -1,
};
+ static struct wakeup_info wi;
if (debug_process_killing) {
ALOGI("%s memory pressure event is triggered", level_name[level]);
@@ -2517,6 +2611,8 @@
return;
}
+ record_wakeup_time(&curr_tm, events ? Event : Polling, &wi);
+
if (kill_timeout_ms &&
get_time_diff_ms(&last_kill_tm, &curr_tm) < static_cast<long>(kill_timeout_ms)) {
/*
@@ -2525,6 +2621,7 @@
*/
if (is_kill_pending()) {
kill_skip_count++;
+ wi.skipped_wakeups++;
return;
}
/*
@@ -2636,7 +2733,7 @@
do_kill:
if (low_ram_device) {
/* For Go devices kill only one task */
- if (find_and_kill_process(level_oomadj[level], -1, NULL, &mi, &curr_tm) == 0) {
+ if (find_and_kill_process(level_oomadj[level], NONE, NULL, &mi, &wi, &curr_tm) == 0) {
if (debug_process_killing) {
ALOGI("Nothing to kill");
}
@@ -2659,7 +2756,7 @@
min_score_adj = level_oomadj[level];
}
- pages_freed = find_and_kill_process(min_score_adj, -1, NULL, &mi, &curr_tm);
+ pages_freed = find_and_kill_process(min_score_adj, NONE, NULL, &mi, &wi, &curr_tm);
if (pages_freed == 0) {
/* Rate limit kill reports when nothing was reclaimed */
@@ -3000,6 +3097,8 @@
static void resume_polling(struct polling_params *poll_params, struct timespec curr_tm) {
poll_params->poll_start_tm = curr_tm;
poll_params->poll_handler = poll_params->paused_handler;
+ poll_params->polling_interval_ms = PSI_POLL_PERIOD_SHORT_MS;
+ poll_params->paused_handler = NULL;
}
static void call_handler(struct event_handler_info* handler_info,
@@ -3034,7 +3133,6 @@
if (get_time_diff_ms(&poll_params->poll_start_tm, &curr_tm) > PSI_WINDOW_SIZE_MS) {
/* Polled for the duration of PSI window, time to stop */
poll_params->poll_handler = NULL;
- poll_params->paused_handler = NULL;
}
break;
}
@@ -3059,12 +3157,8 @@
bool poll_now;
clock_gettime(CLOCK_MONOTONIC_COARSE, &curr_tm);
- if (poll_params.poll_handler == poll_params.paused_handler) {
- /*
- * Just transitioned into POLLING_RESUME. Reset paused_handler
- * and poll immediately
- */
- poll_params.paused_handler = NULL;
+ if (poll_params.update == POLLING_RESUME) {
+ /* Just transitioned into POLLING_RESUME, poll immediately. */
poll_now = true;
nevents = 0;
} else {
@@ -3095,6 +3189,7 @@
stop_wait_for_proc_kill(false);
if (polling_paused(&poll_params)) {
clock_gettime(CLOCK_MONOTONIC_COARSE, &curr_tm);
+ poll_params.update = POLLING_RESUME;
resume_polling(&poll_params, curr_tm);
}
}
@@ -3196,7 +3291,7 @@
property_get_bool("ro.lmk.kill_heaviest_task", false);
low_ram_device = property_get_bool("ro.config.low_ram", false);
kill_timeout_ms =
- (unsigned long)property_get_int32("ro.lmk.kill_timeout_ms", 0);
+ (unsigned long)property_get_int32("ro.lmk.kill_timeout_ms", 100);
use_minfree_levels =
property_get_bool("ro.lmk.use_minfree_levels", false);
per_app_memcg =
diff --git a/statslog.cpp b/statslog.cpp
index 8fb441c..a9606f9 100644
--- a/statslog.cpp
+++ b/statslog.cpp
@@ -69,41 +69,61 @@
return procp;
}
+inline int32_t map_kill_reason(enum kill_reasons reason) {
+ switch (reason) {
+ case PRESSURE_AFTER_KILL:
+ return android::lmkd::stats::LMK_KILL_OCCURRED__REASON__PRESSURE_AFTER_KILL;
+ case NOT_RESPONDING:
+ return android::lmkd::stats::LMK_KILL_OCCURRED__REASON__NOT_RESPONDING;
+ case LOW_SWAP_AND_THRASHING:
+ return android::lmkd::stats::LMK_KILL_OCCURRED__REASON__LOW_SWAP_AND_THRASHING;
+ case LOW_MEM_AND_SWAP:
+ return android::lmkd::stats::LMK_KILL_OCCURRED__REASON__LOW_MEM_AND_SWAP;
+ case LOW_MEM_AND_THRASHING:
+ return android::lmkd::stats::LMK_KILL_OCCURRED__REASON__LOW_MEM_AND_THRASHING;
+ case DIRECT_RECL_AND_THRASHING:
+ return android::lmkd::stats::LMK_KILL_OCCURRED__REASON__DIRECT_RECL_AND_THRASHING;
+ case LOW_MEM_AND_SWAP_UTIL:
+ return android::lmkd::stats::LMK_KILL_OCCURRED__REASON__LOW_MEM_AND_SWAP_UTIL;
+ default:
+ return android::lmkd::stats::LMK_KILL_OCCURRED__REASON__UNKNOWN;
+ }
+}
+
/**
* Logs the event when LMKD kills a process to reduce memory pressure.
* Code: LMK_KILL_OCCURRED = 51
*/
-int
-stats_write_lmk_kill_occurred(int32_t uid, char const* process_name,
- int32_t oom_score, int32_t min_oom_score, int tasksize,
- struct memory_stat *mem_st) {
+int stats_write_lmk_kill_occurred(struct kill_stat *kill_st, struct memory_stat *mem_st) {
if (enable_stats_log) {
return android::lmkd::stats::stats_write(
android::lmkd::stats::LMK_KILL_OCCURRED,
- uid,
- process_name,
- oom_score,
+ kill_st->uid,
+ kill_st->taskname,
+ kill_st->oom_score,
mem_st ? mem_st->pgfault : -1,
mem_st ? mem_st->pgmajfault : -1,
- mem_st ? mem_st->rss_in_bytes : tasksize * BYTES_IN_KILOBYTE,
+ mem_st ? mem_st->rss_in_bytes : kill_st->tasksize * BYTES_IN_KILOBYTE,
mem_st ? mem_st->cache_in_bytes : -1,
mem_st ? mem_st->swap_in_bytes : -1,
mem_st ? mem_st->process_start_time_ns : -1,
- min_oom_score
+ kill_st->min_oom_score,
+ kill_st->free_mem_kb,
+ kill_st->free_swap_kb,
+ map_kill_reason(kill_st->kill_reason)
);
} else {
return -EINVAL;
}
}
-int stats_write_lmk_kill_occurred_pid(int32_t uid, int pid, int32_t oom_score,
- int32_t min_oom_score, int tasksize,
+int stats_write_lmk_kill_occurred_pid(int pid, struct kill_stat *kill_st,
struct memory_stat* mem_st) {
struct proc* proc = pid_lookup(pid);
if (!proc) return -EINVAL;
- return stats_write_lmk_kill_occurred(uid, proc->taskname, oom_score, min_oom_score,
- tasksize, mem_st);
+ kill_st->taskname = proc->taskname;
+ return stats_write_lmk_kill_occurred(kill_st, mem_st);
}
static void memory_stat_parse_line(char* line, struct memory_stat* mem_st) {
diff --git a/statslog.h b/statslog.h
index 9cba6b2..6e59ef2 100644
--- a/statslog.h
+++ b/statslog.h
@@ -37,6 +37,30 @@
int64_t process_start_time_ns;
};
+// If you update this, also update the corresponding stats enum mapping.
+enum kill_reasons {
+ NONE = -1, /* To denote no kill condition */
+ PRESSURE_AFTER_KILL = 0,
+ NOT_RESPONDING,
+ LOW_SWAP_AND_THRASHING,
+ LOW_MEM_AND_SWAP,
+ LOW_MEM_AND_THRASHING,
+ DIRECT_RECL_AND_THRASHING,
+ LOW_MEM_AND_SWAP_UTIL,
+ KILL_REASON_COUNT
+};
+
+struct kill_stat {
+ int32_t uid;
+ char *taskname;
+ enum kill_reasons kill_reason;
+ int32_t oom_score;
+ int32_t min_oom_score;
+ int64_t free_mem_kb;
+ int64_t free_swap_kb;
+ int tasksize;
+};
+
#ifdef LMKD_LOG_STATS
#define MEMCG_PROCESS_MEMORY_STAT_PATH "/dev/memcg/apps/uid_%u/pid_%u/memory.stat"
@@ -56,17 +80,13 @@
* Logs the event when LMKD kills a process to reduce memory pressure.
* Code: LMK_KILL_OCCURRED = 51
*/
-int
-stats_write_lmk_kill_occurred(int32_t uid, char const* process_name,
- int32_t oom_score, int32_t min_oom_score,
- int tasksize, struct memory_stat *mem_st);
+int stats_write_lmk_kill_occurred(struct kill_stat *kill_st, struct memory_stat *mem_st);
/**
* Logs the event when LMKD kills a process to reduce memory pressure.
* Code: LMK_KILL_OCCURRED = 51
*/
-int stats_write_lmk_kill_occurred_pid(int32_t uid, int pid, int32_t oom_score,
- int32_t min_oom_score, int tasksize,
+int stats_write_lmk_kill_occurred_pid(int pid, struct kill_stat *kill_st,
struct memory_stat* mem_st);
struct memory_stat *stats_read_memory_stat(bool per_app_memcg, int pid, uid_t uid);
@@ -92,16 +112,13 @@
stats_write_lmk_state_changed(int32_t state __unused) { return -EINVAL; }
static inline int
-stats_write_lmk_kill_occurred(int32_t uid __unused,
- char const* process_name __unused, int32_t oom_score __unused,
- int32_t min_oom_score __unused, int tasksize __unused,
- struct memory_stat *mem_st __unused) { return -EINVAL; }
+stats_write_lmk_kill_occurred(struct kill_stat *kill_st __unused,
+ struct memory_stat *mem_st __unused) {
+ return -EINVAL;
+}
-static inline int stats_write_lmk_kill_occurred_pid(int32_t uid __unused,
- int pid __unused, int32_t oom_score __unused,
- int32_t min_oom_score __unused,
- int tasksize __unused,
- struct memory_stat* mem_st __unused) {
+int stats_write_lmk_kill_occurred_pid(int pid __unused, struct kill_stat *kill_st __unused,
+ struct memory_stat* mem_st __unused) {
return -EINVAL;
}