Merge "msm: npu: Send IPCC irq directly while apps crashes"

This commit is contained in:
qctecmdr 2020-04-18 05:01:13 -07:00 committed by Gerrit - the friendly Code Review server
commit a0c52ad452
4 changed files with 239 additions and 5 deletions

View file

@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
* Copyright (c) 2017-2020, The Linux Foundation. All rights reserved.
*/
#ifndef _NPU_COMMON_H
@ -341,5 +341,7 @@ int load_fw(struct npu_device *npu_dev);
int unload_fw(struct npu_device *npu_dev);
int npu_set_bw(struct npu_device *npu_dev, int new_ib, int new_ab);
int npu_process_kevent(struct npu_client *client, struct npu_kevent *kevt);
int npu_notify_cdsprm_cxlimit_activity(struct npu_device *npu_dev, bool enable);
int npu_bridge_mbox_send_data(struct npu_host_ctx *host_ctx,
struct npu_mbox *mbox, void *data);
#endif /* _NPU_COMMON_H */

View file

@ -17,6 +17,7 @@
#include <linux/regulator/consumer.h>
#include <linux/thermal.h>
#include <linux/soc/qcom/llcc-qcom.h>
#include <linux/soc/qcom/cdsprm_cxlimit.h>
#include <soc/qcom/devfreq_devbw.h>
#include "npu_common.h"
@ -111,7 +112,8 @@ static int npu_pm_suspend(struct device *dev);
static int npu_pm_resume(struct device *dev);
static int __init npu_init(void);
static void __exit npu_exit(void);
static uint32_t npu_notify_cdsprm_cxlimit_corner(struct npu_device *npu_dev,
uint32_t pwr_lvl);
/* -------------------------------------------------------------------------
* File Scope Variables
* -------------------------------------------------------------------------
@ -387,6 +389,168 @@ static ssize_t boot_store(struct device *dev,
* Power Related
* -------------------------------------------------------------------------
*/
static enum npu_power_level cdsprm_corner_to_npu_power_level(
enum cdsprm_npu_corner corner)
{
enum npu_power_level pwr_lvl = NPU_PWRLEVEL_TURBO_L1;
switch (corner) {
case CDSPRM_NPU_CLK_OFF:
pwr_lvl = NPU_PWRLEVEL_OFF;
break;
case CDSPRM_NPU_MIN_SVS:
pwr_lvl = NPU_PWRLEVEL_MINSVS;
break;
case CDSPRM_NPU_LOW_SVS:
pwr_lvl = NPU_PWRLEVEL_LOWSVS;
break;
case CDSPRM_NPU_SVS:
pwr_lvl = NPU_PWRLEVEL_SVS;
break;
case CDSPRM_NPU_SVS_L1:
pwr_lvl = NPU_PWRLEVEL_SVS_L1;
break;
case CDSPRM_NPU_NOM:
pwr_lvl = NPU_PWRLEVEL_NOM;
break;
case CDSPRM_NPU_NOM_L1:
pwr_lvl = NPU_PWRLEVEL_NOM_L1;
break;
case CDSPRM_NPU_TURBO:
pwr_lvl = NPU_PWRLEVEL_TURBO;
break;
case CDSPRM_NPU_TURBO_L1:
default:
pwr_lvl = NPU_PWRLEVEL_TURBO_L1;
break;
}
return pwr_lvl;
}
static enum cdsprm_npu_corner npu_power_level_to_cdsprm_corner(
enum npu_power_level pwr_lvl)
{
enum cdsprm_npu_corner corner = CDSPRM_NPU_MIN_SVS;
switch (pwr_lvl) {
case NPU_PWRLEVEL_OFF:
corner = CDSPRM_NPU_CLK_OFF;
break;
case NPU_PWRLEVEL_MINSVS:
corner = CDSPRM_NPU_MIN_SVS;
break;
case NPU_PWRLEVEL_LOWSVS:
corner = CDSPRM_NPU_LOW_SVS;
break;
case NPU_PWRLEVEL_SVS:
corner = CDSPRM_NPU_SVS;
break;
case NPU_PWRLEVEL_SVS_L1:
corner = CDSPRM_NPU_SVS_L1;
break;
case NPU_PWRLEVEL_NOM:
corner = CDSPRM_NPU_NOM;
break;
case NPU_PWRLEVEL_NOM_L1:
corner = CDSPRM_NPU_NOM_L1;
break;
case NPU_PWRLEVEL_TURBO:
corner = CDSPRM_NPU_TURBO;
break;
case NPU_PWRLEVEL_TURBO_L1:
default:
corner = CDSPRM_NPU_TURBO_L1;
break;
}
return corner;
}
static int npu_set_cdsprm_corner_limit(enum cdsprm_npu_corner corner)
{
struct npu_pwrctrl *pwr;
enum npu_power_level pwr_lvl;
if (!g_npu_dev)
return 0;
pwr = &g_npu_dev->pwrctrl;
pwr_lvl = cdsprm_corner_to_npu_power_level(corner);
pwr->cdsprm_pwrlevel = pwr_lvl;
NPU_DBG("power level from cdsp %d\n", pwr_lvl);
return npu_set_power_level(g_npu_dev, false);
}
const struct cdsprm_npu_limit_cbs cdsprm_npu_limit_cbs = {
.set_corner_limit = npu_set_cdsprm_corner_limit,
};
int npu_notify_cdsprm_cxlimit_activity(struct npu_device *npu_dev, bool enable)
{
if (!npu_dev->cxlimit_registered)
return 0;
NPU_DBG("notify cxlimit %s activity\n", enable ? "enable" : "disable");
return cdsprm_cxlimit_npu_activity_notify(enable ? 1 : 0);
}
static uint32_t npu_notify_cdsprm_cxlimit_corner(
struct npu_device *npu_dev, uint32_t pwr_lvl)
{
uint32_t corner, pwr_lvl_to_set;
if (!npu_dev->cxlimit_registered)
return pwr_lvl;
corner = npu_power_level_to_cdsprm_corner(pwr_lvl);
corner = cdsprm_cxlimit_npu_corner_notify(corner);
pwr_lvl_to_set = cdsprm_corner_to_npu_power_level(corner);
NPU_DBG("Notify cdsprm %d:%d\n", pwr_lvl,
pwr_lvl_to_set);
return pwr_lvl_to_set;
}
int npu_cdsprm_cxlimit_init(struct npu_device *npu_dev)
{
bool enabled;
int ret = 0;
enabled = of_property_read_bool(npu_dev->pdev->dev.of_node,
"qcom,npu-cxlimit-enable");
NPU_DBG("qcom,npu-xclimit-enable is %s\n", enabled ? "true" : "false");
npu_dev->cxlimit_registered = false;
if (enabled) {
ret = cdsprm_cxlimit_npu_limit_register(&cdsprm_npu_limit_cbs);
if (ret) {
NPU_ERR("register cxlimit npu limit failed\n");
} else {
NPU_DBG("register cxlimit npu limit succeeds\n");
npu_dev->cxlimit_registered = true;
}
}
return ret;
}
int npu_cdsprm_cxlimit_deinit(struct npu_device *npu_dev)
{
int ret = 0;
if (npu_dev->cxlimit_registered) {
ret = cdsprm_cxlimit_npu_limit_deregister();
if (ret)
NPU_ERR("deregister cxlimit npu limit failed\n");
npu_dev->cxlimit_registered = false;
}
return ret;
}
int npu_enable_core_power(struct npu_device *npu_dev)
{
struct npu_pwrctrl *pwr = &npu_dev->pwrctrl;
@ -530,6 +694,11 @@ int npu_set_power_level(struct npu_device *npu_dev, bool notify_cxlimit)
return 0;
}
/* notify cxlimit to get allowed power level */
if ((pwr_level_to_set > pwr->active_pwrlevel) && notify_cxlimit)
pwr_level_to_set = npu_notify_cdsprm_cxlimit_corner(
npu_dev, pwr_level_to_cdsprm);
pwr_level_to_set = min(pwr_level_to_set,
npu_dev->pwrctrl.cdsprm_pwrlevel);
@ -596,6 +765,12 @@ int npu_set_power_level(struct npu_device *npu_dev, bool notify_cxlimit)
ret = 0;
}
if ((pwr_level_to_cdsprm < pwr->active_pwrlevel) && notify_cxlimit) {
npu_notify_cdsprm_cxlimit_corner(npu_dev,
pwr_level_to_cdsprm);
NPU_DBG("Notify cdsprm(post) %d\n", pwr_level_to_cdsprm);
}
pwr->active_pwrlevel = pwr_level_to_set;
return ret;
}
@ -708,6 +883,13 @@ static int npu_enable_clocks(struct npu_device *npu_dev, bool post_pil)
uint32_t pwrlevel_to_set, pwrlevel_idx;
pwrlevel_to_set = pwr->active_pwrlevel;
if (!post_pil) {
pwrlevel_to_set = npu_notify_cdsprm_cxlimit_corner(
npu_dev, pwrlevel_to_set);
NPU_DBG("Notify cdsprm %d\n", pwrlevel_to_set);
pwr->active_pwrlevel = pwrlevel_to_set;
}
pwrlevel_idx = npu_power_level_to_index(npu_dev, pwrlevel_to_set);
pwrlevel = &pwr->pwrlevels[pwrlevel_idx];
for (i = 0; i < npu_dev->core_clk_num; i++) {
@ -775,6 +957,11 @@ static void npu_disable_clocks(struct npu_device *npu_dev, bool post_pil)
int i, rc = 0;
struct npu_clk *core_clks = npu_dev->core_clks;
if (!post_pil) {
npu_notify_cdsprm_cxlimit_corner(npu_dev, NPU_PWRLEVEL_OFF);
NPU_DBG("Notify cdsprm clock off\n");
}
for (i = npu_dev->core_clk_num - 1; i >= 0 ; i--) {
if (post_pil) {
if (!npu_is_post_clock(core_clks[i].clk_name))
@ -2012,6 +2199,10 @@ static int npu_ipcc_bridge_mbox_send_data(struct mbox_chan *chan, void *data)
queue_work(host_ctx->wq, &host_ctx->bridge_mbox_work);
spin_unlock_irqrestore(&host_ctx->bridge_mbox_lock, flags);
if (host_ctx->app_crashed)
npu_bridge_mbox_send_data(host_ctx,
ipcc_mbox_chan->npu_mbox, NULL);
return 0;
}
@ -2459,10 +2650,15 @@ static int npu_probe(struct platform_device *pdev)
thermal_cdev_update(tcdev);
}
rc = npu_cdsprm_cxlimit_init(npu_dev);
if (rc)
goto error_driver_init;
g_npu_dev = npu_dev;
return rc;
error_driver_init:
npu_cdsprm_cxlimit_deinit(npu_dev);
if (npu_dev->tcdev)
thermal_cooling_device_unregister(npu_dev->tcdev);
sysfs_remove_group(&npu_dev->device->kobj, &npu_fs_attr_group);
@ -2487,6 +2683,7 @@ static int npu_remove(struct platform_device *pdev)
npu_dev = platform_get_drvdata(pdev);
npu_host_deinit(npu_dev);
npu_debugfs_deinit(npu_dev);
npu_cdsprm_cxlimit_deinit(npu_dev);
if (npu_dev->tcdev)
thermal_cooling_device_unregister(npu_dev->tcdev);
sysfs_remove_group(&npu_dev->device->kobj, &npu_fs_attr_group);

View file

@ -672,6 +672,25 @@ static int npu_notifier_cb(struct notifier_block *this, unsigned long code,
return ret;
}
static int npu_panic_handler(struct notifier_block *this,
unsigned long event, void *ptr)
{
int i;
struct npu_host_ctx *host_ctx =
container_of(this, struct npu_host_ctx, panic_nb);
struct npu_device *npu_dev = host_ctx->npu_dev;
NPU_INFO("Apps crashed\n");
for (i = 0; i < NPU_MAX_MBOX_NUM; i++)
if (npu_dev->mbox[i].send_data_pending)
npu_bridge_mbox_send_data(host_ctx,
&npu_dev->mbox[i], NULL);
host_ctx->app_crashed = true;
return NOTIFY_DONE;
}
static void npu_update_pwr_work(struct work_struct *work)
{
int ret;
@ -724,6 +743,14 @@ int npu_host_init(struct npu_device *npu_dev)
goto fail;
}
host_ctx->panic_nb.notifier_call = npu_panic_handler;
ret = atomic_notifier_chain_register(&panic_notifier_list,
&host_ctx->panic_nb);
if (ret) {
NPU_ERR("register panic notifier failed\n");
goto fail;
}
host_ctx->wq = create_workqueue("npu_general_wq");
host_ctx->wq_pri =
alloc_workqueue("npu_ipc_wq", WQ_HIGHPRI | WQ_UNBOUND, 0);
@ -1104,7 +1131,7 @@ static void npu_disable_fw_work(struct work_struct *work)
NPU_DBG("Exit disable fw work\n");
}
static int npu_bridge_mbox_send_data(struct npu_host_ctx *host_ctx,
int npu_bridge_mbox_send_data(struct npu_host_ctx *host_ctx,
struct npu_mbox *mbox, void *data)
{
NPU_DBG("Generating IRQ for client_id: %u; signal_id: %u\n",
@ -2697,6 +2724,9 @@ int32_t npu_host_exec_network_v2(struct npu_client *client,
return -EINVAL;
}
if (atomic_inc_return(&host_ctx->network_execute_cnt) == 1)
npu_notify_cdsprm_cxlimit_activity(npu_dev, true);
if (!network->is_active) {
NPU_ERR("network is not active\n");
ret = -EINVAL;
@ -2847,6 +2877,9 @@ exec_v2_done:
host_error_hdlr(npu_dev, true);
}
if (atomic_dec_return(&host_ctx->network_execute_cnt) == 0)
npu_notify_cdsprm_cxlimit_activity(npu_dev, false);
return ret;
}

View file

@ -126,13 +126,15 @@ struct npu_host_ctx {
uint32_t fw_dbg_mode;
uint32_t exec_flags_override;
atomic_t ipc_trans_id;
atomic_t network_exeute_cnt;
atomic_t network_execute_cnt;
uint32_t err_irq_sts;
uint32_t wdg_irq_sts;
bool fw_error;
bool cancel_work;
bool app_crashed;
struct notifier_block nb;
struct notifier_block panic_nb;
void *notif_hdle;
spinlock_t bridge_mbox_lock;
bool bridge_mbox_pwr_on;