ANDROID: Incremental fs: wake up log pollers less often

Waking up the waiters accounts for 80+% of the total logging
time, and about 40% of overall read_single_page() with no
signature verification. By throttling it to once every 16ms
we get back all read performance, reduce the waiter's CPU
usage and still leave it enough time to pull the logs out.

Bug: 155996534
Test: adb install megacity.apk & dd from the installed apk

Signed-off-by: Yurii Zubrytskyi <zyy@google.com>
Signed-off-by: Paul Lawrence <paullawrence@google.com>
Change-Id: I4a118dc226d7ca318cf099ba3e239f0120bb23c2
This commit is contained in:
Yurii Zubrytskyi 2020-05-13 17:04:44 -07:00 committed by Paul Lawrence
parent c92446c89f
commit 66e2580207
2 changed files with 27 additions and 3 deletions

View file

@ -8,6 +8,7 @@
#include <linux/file.h>
#include <linux/ktime.h>
#include <linux/mm.h>
#include <linux/workqueue.h>
#include <linux/pagemap.h>
#include <linux/lz4.h>
#include <linux/crc32.h>
@ -16,6 +17,13 @@
#include "format.h"
#include "integrity.h"
static void log_wake_up_all(struct work_struct *work)
{
struct delayed_work *dw = container_of(work, struct delayed_work, work);
struct read_log *rl = container_of(dw, struct read_log, ml_wakeup_work);
wake_up_all(&rl->ml_notif_wq);
}
struct mount_info *incfs_alloc_mount_info(struct super_block *sb,
struct mount_options *options,
struct path *backing_dir_path)
@ -35,6 +43,7 @@ struct mount_info *incfs_alloc_mount_info(struct super_block *sb,
mutex_init(&mi->mi_pending_reads_mutex);
init_waitqueue_head(&mi->mi_pending_reads_notif_wq);
init_waitqueue_head(&mi->mi_log.ml_notif_wq);
INIT_DELAYED_WORK(&mi->mi_log.ml_wakeup_work, log_wake_up_all);
spin_lock_init(&mi->mi_log.rl_lock);
INIT_LIST_HEAD(&mi->mi_reads_list_head);
@ -95,6 +104,8 @@ void incfs_free_mount_info(struct mount_info *mi)
if (!mi)
return;
flush_delayed_work(&mi->mi_log.ml_wakeup_work);
dput(mi->mi_index_dir);
path_put(&mi->mi_backing_dir_path);
mutex_destroy(&mi->mi_dir_struct_mutex);
@ -297,11 +308,20 @@ static void log_block_read(struct mount_info *mi, incfs_uuid_t *id,
{
struct read_log *log = &mi->mi_log;
struct read_log_state *head, *tail;
s64 now_us = ktime_to_us(ktime_get());
s64 now_us;
s64 relative_us;
union log_record record;
size_t record_size;
/*
* This may read the old value, but it's OK to delay the logging start
* right after the configuration update.
*/
if (READ_ONCE(log->rl_size) == 0)
return;
now_us = ktime_to_us(ktime_get());
spin_lock(&log->rl_lock);
if (log->rl_size == 0) {
spin_unlock(&log->rl_lock);
@ -320,6 +340,7 @@ static void log_block_read(struct mount_info *mi, incfs_uuid_t *id,
.file_id = *id,
.absolute_ts_us = now_us,
};
head->base_record.file_id = *id;
record_size = sizeof(struct full_record);
} else if (block_index != head->base_record.block_index + 1 ||
relative_us >= 1 << 30) {
@ -344,7 +365,6 @@ static void log_block_read(struct mount_info *mi, incfs_uuid_t *id,
record_size = sizeof(struct same_file_next_block_short);
}
head->base_record.file_id = *id;
head->base_record.block_index = block_index;
head->base_record.absolute_ts_us = now_us;
@ -363,7 +383,8 @@ static void log_block_read(struct mount_info *mi, incfs_uuid_t *id,
++head->current_record_no;
spin_unlock(&log->rl_lock);
wake_up_all(&log->ml_notif_wq);
if (schedule_delayed_work(&log->ml_wakeup_work, msecs_to_jiffies(16)))
pr_debug("incfs: scheduled a log pollers wakeup");
}
static int validate_hash_tree(struct file *bf, struct data_file *df,

View file

@ -89,6 +89,9 @@ struct read_log {
/* A queue of waiters who want to be notified about reads */
wait_queue_head_t ml_notif_wq;
/* A work item to wake up those waiters without slowing down readers */
struct delayed_work ml_wakeup_work;
};
struct mount_options {