android_kernel_motorola_sm6225/kernel/locking/rwsem.h
Maria Yu c4b6927bb6 locking/rwsem: for rwsem prio aware enhancement
When add into waiting list will be prio aware,
lower prio value means higher priority task will get lock
before lower priority task.
Only try to preempt waiters with which task priority
which is higher than DEFAULT_PRIO.
To avoid starvation, add count to record how many waiters
preempt to queue in wait list. If preempt count is exceed
MAX_PREEMPT_ALLOWED, use simple FIFO to queue in the wait
list until the wait list is empty.

Change-Id: I4d5fe6a823a16c9762e2e2f416d34bdd701341c4
Signed-off-by: Maria Yu <aiquny@codeaurora.org>
Signed-off-by: Biao long <blong@codeaurora.org>
2019-05-24 15:32:11 +08:00

155 lines
4.3 KiB
C

/* SPDX-License-Identifier: GPL-2.0 */
/*
* The owner field of the rw_semaphore structure will be set to
* RWSEM_READER_OWNED when a reader grabs the lock. A writer will clear
* the owner field when it unlocks. A reader, on the other hand, will
* not touch the owner field when it unlocks.
*
* In essence, the owner field now has the following 4 states:
* 1) 0
* - lock is free or the owner hasn't set the field yet
* 2) RWSEM_READER_OWNED
* - lock is currently or previously owned by readers (lock is free
* or not set by owner yet)
* 3) RWSEM_ANONYMOUSLY_OWNED bit set with some other bits set as well
* - lock is owned by an anonymous writer, so spinning on the lock
* owner should be disabled.
* 4) Other non-zero value
* - a writer owns the lock and other writers can spin on the lock owner.
*/
#define RWSEM_ANONYMOUSLY_OWNED (1UL << 0)
#define RWSEM_READER_OWNED ((struct task_struct *)RWSEM_ANONYMOUSLY_OWNED)
#ifdef CONFIG_DEBUG_RWSEMS
# define DEBUG_RWSEMS_WARN_ON(c) DEBUG_LOCKS_WARN_ON(c)
#else
# define DEBUG_RWSEMS_WARN_ON(c)
#endif
enum rwsem_waiter_type {
RWSEM_WAITING_FOR_WRITE,
RWSEM_WAITING_FOR_READ
};
struct rwsem_waiter {
struct list_head list;
struct task_struct *task;
enum rwsem_waiter_type type;
};
#ifdef CONFIG_RWSEM_SPIN_ON_OWNER
/*
* All writes to owner are protected by WRITE_ONCE() to make sure that
* store tearing can't happen as optimistic spinners may read and use
* the owner value concurrently without lock. Read from owner, however,
* may not need READ_ONCE() as long as the pointer value is only used
* for comparison and isn't being dereferenced.
*/
static inline void rwsem_set_owner(struct rw_semaphore *sem)
{
WRITE_ONCE(sem->owner, current);
}
static inline void rwsem_clear_owner(struct rw_semaphore *sem)
{
WRITE_ONCE(sem->owner, NULL);
}
static inline void rwsem_set_reader_owned(struct rw_semaphore *sem)
{
/*
* We check the owner value first to make sure that we will only
* do a write to the rwsem cacheline when it is really necessary
* to minimize cacheline contention.
*/
if (READ_ONCE(sem->owner) != RWSEM_READER_OWNED)
WRITE_ONCE(sem->owner, RWSEM_READER_OWNED);
}
/*
* Return true if the a rwsem waiter can spin on the rwsem's owner
* and steal the lock, i.e. the lock is not anonymously owned.
* N.B. !owner is considered spinnable.
*/
static inline bool is_rwsem_owner_spinnable(struct task_struct *owner)
{
return !((unsigned long)owner & RWSEM_ANONYMOUSLY_OWNED);
}
/*
* Return true if rwsem is owned by an anonymous writer or readers.
*/
static inline bool rwsem_has_anonymous_owner(struct task_struct *owner)
{
return (unsigned long)owner & RWSEM_ANONYMOUSLY_OWNED;
}
#else
static inline void rwsem_set_owner(struct rw_semaphore *sem)
{
}
static inline void rwsem_clear_owner(struct rw_semaphore *sem)
{
}
static inline void rwsem_set_reader_owned(struct rw_semaphore *sem)
{
}
#endif
#ifdef CONFIG_RWSEM_PRIO_AWARE
#define RWSEM_MAX_PREEMPT_ALLOWED 3000
/*
* Return true if current waiter is added in the front of the rwsem wait list.
*/
static inline bool rwsem_list_add_per_prio(struct rwsem_waiter *waiter_in,
struct rw_semaphore *sem)
{
struct list_head *pos;
struct list_head *head;
struct rwsem_waiter *waiter = NULL;
pos = head = &sem->wait_list;
/*
* Rules for task prio aware rwsem wait list queueing:
* 1: Only try to preempt waiters with which task priority
* which is higher than DEFAULT_PRIO.
* 2: To avoid starvation, add count to record
* how many high priority waiters preempt to queue in wait
* list.
* If preempt count is exceed RWSEM_MAX_PREEMPT_ALLOWED,
* use simple fifo until wait list is empty.
*/
if (list_empty(head)) {
list_add_tail(&waiter_in->list, head);
sem->m_count = 0;
return true;
}
if (waiter_in->task->prio < DEFAULT_PRIO
&& sem->m_count < RWSEM_MAX_PREEMPT_ALLOWED) {
list_for_each(pos, head) {
waiter = list_entry(pos, struct rwsem_waiter, list);
if (waiter->task->prio > waiter_in->task->prio) {
list_add(&waiter_in->list, pos->prev);
sem->m_count++;
return &waiter_in->list == head->next;
}
}
}
list_add_tail(&waiter_in->list, head);
return false;
}
#else
static inline bool rwsem_list_add_per_prio(struct rwsem_waiter *waiter_in,
struct rw_semaphore *sem)
{
list_add_tail(&waiter_in->list, &sem->wait_list);
return false;
}
#endif