Merge "mhi: core: improve time synchronization events handling"

This commit is contained in:
qctecmdr 2020-05-01 16:16:54 -07:00 committed by Gerrit - the friendly Code Review server
commit c051a75c99
4 changed files with 97 additions and 76 deletions

View file

@ -1225,7 +1225,7 @@ static int of_parse_ev_cfg(struct mhi_controller *mhi_cntrl,
mhi_event->process_event = mhi_process_ctrl_ev_ring;
break;
case MHI_ER_TSYNC_ELEMENT_TYPE:
mhi_event->process_event = mhi_process_tsync_event_ring;
mhi_event->process_event = mhi_process_tsync_ev_ring;
break;
case MHI_ER_BW_SCALE_ELEMENT_TYPE:
mhi_event->process_event = mhi_process_bw_scale_ev_ring;

View file

@ -717,8 +717,6 @@ struct mhi_chan {
struct tsync_node {
struct list_head node;
u32 sequence;
u32 int_sequence;
u64 local_time;
u64 remote_time;
struct mhi_device *mhi_dev;
void (*cb_func)(struct mhi_device *mhi_dev, u32 sequence,
@ -728,7 +726,9 @@ struct tsync_node {
struct mhi_timesync {
void __iomem *time_reg;
u32 int_sequence;
u64 local_time;
bool db_support;
bool db_response_pending;
spinlock_t lock; /* list protection */
struct list_head head;
};
@ -787,8 +787,8 @@ int mhi_process_data_event_ring(struct mhi_controller *mhi_cntrl,
struct mhi_event *mhi_event, u32 event_quota);
int mhi_process_ctrl_ev_ring(struct mhi_controller *mhi_cntrl,
struct mhi_event *mhi_event, u32 event_quota);
int mhi_process_tsync_event_ring(struct mhi_controller *mhi_cntrl,
struct mhi_event *mhi_event, u32 event_quota);
int mhi_process_tsync_ev_ring(struct mhi_controller *mhi_cntrl,
struct mhi_event *mhi_event, u32 event_quota);
int mhi_process_bw_scale_ev_ring(struct mhi_controller *mhi_cntrl,
struct mhi_event *mhi_event, u32 event_quota);
int mhi_send_cmd(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan,

View file

@ -1347,82 +1347,89 @@ int mhi_process_data_event_ring(struct mhi_controller *mhi_cntrl,
return count;
}
int mhi_process_tsync_event_ring(struct mhi_controller *mhi_cntrl,
struct mhi_event *mhi_event,
u32 event_quota)
int mhi_process_tsync_ev_ring(struct mhi_controller *mhi_cntrl,
struct mhi_event *mhi_event,
u32 event_quota)
{
struct mhi_tre *dev_rp, *local_rp;
struct mhi_tre *dev_rp;
struct mhi_ring *ev_ring = &mhi_event->ring;
struct mhi_event_ctxt *er_ctxt =
&mhi_cntrl->mhi_ctxt->er_ctxt[mhi_event->er_index];
struct mhi_timesync *mhi_tsync = mhi_cntrl->mhi_tsync;
int count = 0;
u32 int_sequence, unit;
u32 sequence;
u64 remote_time;
int ret = 0;
if (unlikely(MHI_EVENT_ACCESS_INVALID(mhi_cntrl->pm_state))) {
MHI_LOG("No EV access, PM_STATE:%s\n",
to_mhi_pm_state_str(mhi_cntrl->pm_state));
return -EIO;
}
spin_lock_bh(&mhi_event->lock);
dev_rp = mhi_to_virtual(ev_ring, er_ctxt->rp);
local_rp = ev_ring->rp;
while (dev_rp != local_rp) {
enum MHI_PKT_TYPE type = MHI_TRE_GET_EV_TYPE(local_rp);
struct tsync_node *tsync_node;
MHI_VERB("Processing Event:0x%llx 0x%08x 0x%08x\n",
local_rp->ptr, local_rp->dword[0], local_rp->dword[1]);
MHI_ASSERT(type != MHI_PKT_TYPE_TSYNC_EVENT, "!TSYNC event");
int_sequence = MHI_TRE_GET_EV_TSYNC_SEQ(local_rp);
unit = MHI_TRE_GET_EV_TSYNC_UNIT(local_rp);
remote_time = MHI_TRE_GET_EV_TIME(local_rp);
do {
spin_lock(&mhi_tsync->lock);
tsync_node = list_first_entry_or_null(&mhi_tsync->head,
struct tsync_node, node);
if (!tsync_node) {
spin_unlock(&mhi_tsync->lock);
break;
}
list_del(&tsync_node->node);
spin_unlock(&mhi_tsync->lock);
/*
* device may not able to process all time sync commands
* host issue and only process last command it receive
*/
if (tsync_node->int_sequence == int_sequence) {
tsync_node->cb_func(tsync_node->mhi_dev,
tsync_node->sequence,
tsync_node->local_time,
remote_time);
kfree(tsync_node);
} else {
kfree(tsync_node);
}
} while (true);
mhi_recycle_ev_ring_element(mhi_cntrl, ev_ring);
local_rp = ev_ring->rp;
dev_rp = mhi_to_virtual(ev_ring, er_ctxt->rp);
count++;
if (ev_ring->rp == dev_rp) {
spin_unlock_bh(&mhi_event->lock);
goto exit_tsync_process;
}
/* if rp points to base, we need to wrap it around */
if (dev_rp == ev_ring->base)
dev_rp = ev_ring->base + ev_ring->len;
dev_rp--;
/* fast forward to currently processed element and recycle er */
ev_ring->rp = dev_rp;
ev_ring->wp = dev_rp - 1;
if (ev_ring->wp < ev_ring->base)
ev_ring->wp = ev_ring->base + ev_ring->len - ev_ring->el_size;
mhi_recycle_fwd_ev_ring_element(mhi_cntrl, ev_ring);
MHI_ASSERT(MHI_TRE_GET_EV_TYPE(dev_rp) != MHI_PKT_TYPE_TSYNC_EVENT,
"!TSYNC event");
sequence = MHI_TRE_GET_EV_TSYNC_SEQ(dev_rp);
remote_time = MHI_TRE_GET_EV_TIME(dev_rp);
MHI_VERB("Received TSYNC event with seq:0x%llx time:0x%llx\n",
sequence, remote_time);
read_lock_bh(&mhi_cntrl->pm_lock);
if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl)))
mhi_ring_er_db(mhi_event);
read_unlock_bh(&mhi_cntrl->pm_lock);
spin_unlock_bh(&mhi_event->lock);
mutex_lock(&mhi_cntrl->tsync_mutex);
if (unlikely(mhi_tsync->int_sequence != sequence)) {
MHI_ASSERT(1, "Unexpected response:0x%llx Expected:0x%llx\n",
sequence, mhi_tsync->int_sequence);
mutex_unlock(&mhi_cntrl->tsync_mutex);
goto exit_tsync_process;
}
do {
struct tsync_node *tsync_node;
spin_lock(&mhi_tsync->lock);
tsync_node = list_first_entry_or_null(&mhi_tsync->head,
struct tsync_node, node);
if (!tsync_node) {
spin_unlock(&mhi_tsync->lock);
break;
}
list_del(&tsync_node->node);
spin_unlock(&mhi_tsync->lock);
tsync_node->cb_func(tsync_node->mhi_dev,
tsync_node->sequence,
mhi_tsync->local_time, remote_time);
kfree(tsync_node);
} while (true);
mhi_tsync->db_response_pending = false;
mutex_unlock(&mhi_cntrl->tsync_mutex);
exit_tsync_process:
MHI_VERB("exit er_index:%u\n", mhi_event->er_index);
return count;
return ret;
}
int mhi_process_bw_scale_ev_ring(struct mhi_controller *mhi_cntrl,
@ -2575,7 +2582,7 @@ int mhi_get_remote_time(struct mhi_device *mhi_dev,
struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
struct mhi_timesync *mhi_tsync = mhi_cntrl->mhi_tsync;
struct tsync_node *tsync_node;
int ret;
int ret = 0;
/* not all devices support all time features */
mutex_lock(&mhi_cntrl->tsync_mutex);
@ -2599,6 +2606,10 @@ int mhi_get_remote_time(struct mhi_device *mhi_dev,
}
read_unlock_bh(&mhi_cntrl->pm_lock);
MHI_LOG("Enter with pm_state:%s MHI_STATE:%s\n",
to_mhi_pm_state_str(mhi_cntrl->pm_state),
TO_MHI_STATE_STR(mhi_cntrl->dev_state));
/*
* technically we can use GFP_KERNEL, but wants to avoid
* # of times scheduling out
@ -2609,15 +2620,17 @@ int mhi_get_remote_time(struct mhi_device *mhi_dev,
goto error_no_mem;
}
tsync_node->sequence = sequence;
tsync_node->cb_func = cb_func;
tsync_node->mhi_dev = mhi_dev;
if (mhi_tsync->db_response_pending)
goto skip_tsync_db;
mhi_tsync->int_sequence++;
if (mhi_tsync->int_sequence == 0xFFFFFFFF)
mhi_tsync->int_sequence = 0;
tsync_node->sequence = sequence;
tsync_node->int_sequence = mhi_tsync->int_sequence;
tsync_node->cb_func = cb_func;
tsync_node->mhi_dev = mhi_dev;
/* disable link level low power modes */
ret = mhi_cntrl->lpm_disable(mhi_cntrl, mhi_cntrl->priv_data);
if (ret) {
@ -2626,10 +2639,6 @@ int mhi_get_remote_time(struct mhi_device *mhi_dev,
goto error_invalid_state;
}
spin_lock(&mhi_tsync->lock);
list_add_tail(&tsync_node->node, &mhi_tsync->head);
spin_unlock(&mhi_tsync->lock);
/*
* time critical code, delay between these two steps should be
* deterministic as possible.
@ -2637,9 +2646,9 @@ int mhi_get_remote_time(struct mhi_device *mhi_dev,
preempt_disable();
local_irq_disable();
tsync_node->local_time =
mhi_tsync->local_time =
mhi_cntrl->time_get(mhi_cntrl, mhi_cntrl->priv_data);
writel_relaxed_no_log(tsync_node->int_sequence, mhi_cntrl->tsync_db);
writel_relaxed_no_log(mhi_tsync->int_sequence, mhi_cntrl->tsync_db);
/* write must go thru immediately */
wmb();
@ -2648,6 +2657,15 @@ int mhi_get_remote_time(struct mhi_device *mhi_dev,
mhi_cntrl->lpm_enable(mhi_cntrl, mhi_cntrl->priv_data);
MHI_VERB("time DB request with seq:0x%llx\n", mhi_tsync->int_sequence);
mhi_tsync->db_response_pending = true;
skip_tsync_db:
spin_lock(&mhi_tsync->lock);
list_add_tail(&tsync_node->node, &mhi_tsync->head);
spin_unlock(&mhi_tsync->lock);
ret = 0;
error_invalid_state:

View file

@ -824,6 +824,9 @@ void mhi_special_purpose_work(struct work_struct *work)
TO_MHI_STATE_STR(mhi_cntrl->dev_state),
TO_MHI_EXEC_STR(mhi_cntrl->ee));
if (unlikely(MHI_EVENT_ACCESS_INVALID(mhi_cntrl->pm_state)))
return;
/* check special purpose event rings and process events */
list_for_each_entry(mhi_event, &mhi_cntrl->sp_ev_rings, node)
mhi_event->process_event(mhi_cntrl, mhi_event, U32_MAX);