Merge tag 'LA.UM.9.12.r1-15600-SMxx50.0' of https://git.codelinaro.org/clo/la/platform/vendor/opensource/camera-kernel into android13-4.19-kona

"LA.UM.9.12.r1-15600-SMxx50.0"

* tag 'LA.UM.9.12.r1-15600-SMxx50.0' of https://git.codelinaro.org/clo/la/platform/vendor/opensource/camera-kernel:
  msm: camera: mem_mgr: release buffers after usage
  msm: camera: sensor: Add changes to prevent unmap buffers
  msm: camera: mem_mgr: Add refcount to track in use buffers
  msm: camera: core: validation of session/device/link handle
  msm: camera: cci: Fix some cci stability issues
  msm: camera: cci: Move load report cmd in lock context
  msm: camera: cci: Add report id in report command for CCI I2C queue

Change-Id: If0a91190c9d87eeca85d6d4c5572d57c17ce333d
This commit is contained in:
Michael Bestas 2023-08-25 12:04:44 +03:00
commit 4b455c21d5
No known key found for this signature in database
GPG key ID: CC95044519BE6669
29 changed files with 617 additions and 187 deletions

View file

@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
* Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/delay.h>
@ -115,7 +116,7 @@ int cam_virtual_cdm_submit_bl(struct cam_hw_info *cdm_hw,
cdm_cmd->cmd[i].len) {
CAM_ERR(CAM_CDM, "Not enough buffer");
rc = -EINVAL;
break;
goto end;
}
CAM_DBG(CAM_CDM,
"hdl=%x vaddr=%pK offset=%d cmdlen=%d:%zu",
@ -133,7 +134,7 @@ int cam_virtual_cdm_submit_bl(struct cam_hw_info *cdm_hw,
"write failed for cnt=%d:%d len %u",
i, req->data->cmd_arrary_count,
cdm_cmd->cmd[i].len);
break;
goto end;
}
} else {
CAM_ERR(CAM_CDM,
@ -144,7 +145,7 @@ int cam_virtual_cdm_submit_bl(struct cam_hw_info *cdm_hw,
"Sanity check failed for cmd_count=%d cnt=%d",
i, req->data->cmd_arrary_count);
rc = -EINVAL;
break;
goto end;
}
if (!rc) {
struct cam_cdm_work_payload *payload;
@ -161,7 +162,7 @@ int cam_virtual_cdm_submit_bl(struct cam_hw_info *cdm_hw,
GFP_KERNEL);
if (!node) {
rc = -ENOMEM;
break;
goto end;
}
node->request_type = CAM_HW_CDM_BL_CB_CLIENT;
node->client_hdl = req->handle;
@ -193,9 +194,20 @@ int cam_virtual_cdm_submit_bl(struct cam_hw_info *cdm_hw,
if (!rc && (core->bl_tag == 63))
core->bl_tag = 0;
}
if (req->data->type == CAM_CDM_BL_CMD_TYPE_MEM_HANDLE)
cam_mem_put_cpu_buf(cdm_cmd->cmd[i].bl_addr.mem_handle);
}
mutex_unlock(&client->lock);
return rc;
end:
if (req->data->type == CAM_CDM_BL_CMD_TYPE_MEM_HANDLE)
cam_mem_put_cpu_buf(cdm_cmd->cmd[i].bl_addr.mem_handle);
mutex_unlock(&client->lock);
return rc;
}
int cam_virtual_cdm_probe(struct platform_device *pdev)

View file

@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2017-2020, The Linux Foundation. All rights reserved.
* Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/debugfs.h>
@ -300,6 +301,7 @@ int32_t cam_context_config_dev_to_hw(
(cmd->offset >= (len - sizeof(struct cam_packet)))) {
CAM_ERR(CAM_CTXT, "Not enough buf, len : %zu offset = %llu",
len, cmd->offset);
cam_mem_put_cpu_buf((int32_t) cmd->packet_handle);
return -EINVAL;
}
@ -321,6 +323,7 @@ int32_t cam_context_config_dev_to_hw(
rc = -EFAULT;
}
cam_mem_put_cpu_buf((int32_t) cmd->packet_handle);
return rc;
}
@ -382,6 +385,7 @@ int32_t cam_context_prepare_dev_to_hw(struct cam_context *ctx,
if ((len < sizeof(struct cam_packet)) ||
(cmd->offset >= (len - sizeof(struct cam_packet)))) {
CAM_ERR(CAM_CTXT, "Not enough buf");
cam_mem_put_cpu_buf((int32_t) cmd->packet_handle);
return -EINVAL;
}
@ -500,7 +504,7 @@ int32_t cam_context_prepare_dev_to_hw(struct cam_context *ctx,
req->in_map_entries[j].sync_id, rc);
}
}
cam_mem_put_cpu_buf((int32_t) cmd->packet_handle);
return rc;
put_ref:
for (--i; i >= 0; i--) {
@ -514,6 +518,7 @@ free_req:
req->ctx = NULL;
spin_unlock(&ctx->lock);
cam_mem_put_cpu_buf((int32_t) cmd->packet_handle);
return rc;
}
@ -1107,6 +1112,7 @@ static int cam_context_dump_context(struct cam_context *ctx,
if (dump_args->offset >= buf_len) {
CAM_WARN(CAM_CTXT, "dump buffer overshoot offset %zu len %zu",
dump_args->offset, buf_len);
cam_mem_put_cpu_buf(dump_args->buf_handle);
return -ENOSPC;
}
@ -1118,6 +1124,7 @@ static int cam_context_dump_context(struct cam_context *ctx,
if (remain_len < min_len) {
CAM_WARN(CAM_CTXT, "dump buffer exhaust remain %zu min %u",
remain_len, min_len);
cam_mem_put_cpu_buf(dump_args->buf_handle);
return -ENOSPC;
}
dst = (uint8_t *)cpu_addr + dump_args->offset;
@ -1142,7 +1149,8 @@ static int cam_context_dump_context(struct cam_context *ctx,
hdr->size = hdr->word_size * (addr - start);
dump_args->offset += hdr->size +
sizeof(struct cam_context_dump_header);
return rc;
cam_mem_put_cpu_buf(dump_args->buf_handle);
return 0;
}
int32_t cam_context_dump_dev_to_hw(struct cam_context *ctx,

View file

@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2019-2020, The Linux Foundation. All rights reserved.
* Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/slab.h>
@ -1129,6 +1130,7 @@ static int cam_custom_mgr_prepare_hw_update(void *hw_mgr_priv,
}
cam_custom_add_io_buffers(hw_mgr->img_iommu_hdl, prepare);
cam_mem_put_cpu_buf(cmd_desc->mem_handle);
return 0;
}

View file

@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2017-2020, The Linux Foundation. All rights reserved.
* Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
* Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/module.h>
@ -543,6 +543,33 @@ static int cam_fd_mgr_util_get_buf_map_requirement(uint32_t direction,
return 0;
}
static int cam_fd_mgr_put_cpu_buf(struct cam_hw_prepare_update_args *prepare)
{
int i, rc;
uint32_t plane;
bool need_io_map, need_cpu_map;
struct cam_buf_io_cfg *io_cfg;
io_cfg = (struct cam_buf_io_cfg *) ((uint8_t *)
&prepare->packet->payload + prepare->packet->io_configs_offset);
if (!io_cfg)
return -EINVAL;
for (i = 0; i < prepare->packet->num_io_configs; i++) {
rc = cam_fd_mgr_util_get_buf_map_requirement(
io_cfg[i].direction, io_cfg[i].resource_type,
&need_io_map, &need_cpu_map);
if (rc || !need_cpu_map)
continue;
for (plane = 0; plane < CAM_PACKET_MAX_PLANES; plane++)
cam_mem_put_cpu_buf(io_cfg[i].mem_handle[plane]);
}
return 0;
}
static int cam_fd_mgr_util_prepare_io_buf_info(int32_t iommu_hdl,
struct cam_hw_prepare_update_args *prepare,
struct cam_fd_hw_io_buffer *input_buf,
@ -639,6 +666,8 @@ static int cam_fd_mgr_util_prepare_io_buf_info(int32_t iommu_hdl,
"Invalid cpu buf %d %d %d",
io_cfg[i].direction,
io_cfg[i].resource_type, plane);
cam_mem_put_cpu_buf(
io_cfg[i].mem_handle[plane]);
rc = -EINVAL;
return rc;
}
@ -1618,6 +1647,7 @@ hw_dump:
if (fd_dump_args.buf_len <= dump_args->offset) {
CAM_WARN(CAM_FD, "dump offset overshoot len %zu offset %zu",
fd_dump_args.buf_len, dump_args->offset);
cam_mem_put_cpu_buf(dump_args->buf_handle);
return -ENOSPC;
}
remain_len = fd_dump_args.buf_len - dump_args->offset;
@ -1627,6 +1657,7 @@ hw_dump:
if (remain_len < min_len) {
CAM_WARN(CAM_FD, "dump buffer exhaust remain %zu min %u",
remain_len, min_len);
cam_mem_put_cpu_buf(dump_args->buf_handle);
return -ENOSPC;
}
@ -1658,12 +1689,14 @@ hw_dump:
if (rc) {
CAM_ERR(CAM_FD, "Hw Dump cmd fails req %lld rc %d",
frame_req->request_id, rc);
cam_mem_put_cpu_buf(dump_args->buf_handle);
return rc;
}
}
CAM_DBG(CAM_FD, "Offset before %zu after %zu",
dump_args->offset, fd_dump_args.offset);
dump_args->offset = fd_dump_args.offset;
cam_mem_put_cpu_buf(dump_args->buf_handle);
return rc;
}
@ -1797,7 +1830,7 @@ static int cam_fd_mgr_hw_prepare_update(void *hw_mgr_priv,
&prestart_args, &kmd_buf);
if (rc) {
CAM_ERR(CAM_FD, "Error in hw update entries %d", rc);
goto error;
goto put_cpu_buf;
}
/* get a free frame req from free list */
@ -1806,7 +1839,8 @@ static int cam_fd_mgr_hw_prepare_update(void *hw_mgr_priv,
if (rc || !frame_req) {
CAM_ERR(CAM_FD, "Get frame_req failed, rc=%d, hw_ctx=%pK",
rc, hw_ctx);
return -ENOMEM;
rc = -ENOMEM;
goto put_cpu_buf;
}
/* Setup frame request info and queue to pending list */
@ -1821,9 +1855,13 @@ static int cam_fd_mgr_hw_prepare_update(void *hw_mgr_priv,
*/
prepare->priv = frame_req;
cam_fd_mgr_put_cpu_buf(prepare);
CAM_DBG(CAM_FD, "FramePrepare : Frame[%lld]", frame_req->request_id);
return 0;
put_cpu_buf:
cam_fd_mgr_put_cpu_buf(prepare);
error:
return rc;
}

View file

@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2017-2020, The Linux Foundation. All rights reserved.
* Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/debugfs.h>
@ -156,6 +157,7 @@ static int __cam_icp_config_dev_in_ready(struct cam_context *ctx,
CAM_ERR(CAM_CTXT,
"Invalid offset, len: %zu cmd offset: %llu sizeof packet: %zu",
len, cmd->offset, sizeof(struct cam_packet));
cam_mem_put_cpu_buf((int32_t) cmd->packet_handle);
return -EINVAL;
}
@ -167,6 +169,7 @@ static int __cam_icp_config_dev_in_ready(struct cam_context *ctx,
if (rc) {
CAM_ERR(CAM_CTXT, "Invalid packet params, remain length: %zu",
remain_len);
cam_mem_put_cpu_buf((int32_t) cmd->packet_handle);
return rc;
}
@ -181,6 +184,7 @@ static int __cam_icp_config_dev_in_ready(struct cam_context *ctx,
if (rc)
CAM_ERR(CAM_ICP, "Failed to prepare device");
cam_mem_put_cpu_buf((int32_t) cmd->packet_handle);
return rc;
}

View file

@ -1,6 +1,8 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2017-2021, The Linux Foundation. All rights reserved.
* Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
*
*/
#include <linux/uaccess.h>
@ -86,7 +88,7 @@ static int cam_icp_dump_io_cfg(struct cam_icp_hw_ctx_data *ctx_data,
used = 0;
}
}
cam_mem_put_cpu_buf(buf_handle);
return rc;
}
@ -4286,6 +4288,7 @@ static int cam_icp_mgr_process_cmd_desc(struct cam_icp_hw_mgr *hw_mgr,
*fw_cmd_buf_iova_addr =
(*fw_cmd_buf_iova_addr + cmd_desc[i].offset);
rc = cam_mem_get_cpu_buf(cmd_desc[i].mem_handle,
&cpu_addr, &len);
if (rc || !cpu_addr) {
@ -4302,9 +4305,12 @@ static int cam_icp_mgr_process_cmd_desc(struct cam_icp_hw_mgr *hw_mgr,
((len - cmd_desc[i].offset) <
cmd_desc[i].length)) {
CAM_ERR(CAM_ICP, "Invalid offset or length");
cam_mem_put_cpu_buf(cmd_desc[i].mem_handle);
return -EINVAL;
}
cpu_addr = cpu_addr + cmd_desc[i].offset;
cam_mem_put_cpu_buf(cmd_desc[i].mem_handle);
}
}
@ -5260,6 +5266,7 @@ hw_dump:
req_ts.tv_nsec/NSEC_PER_USEC,
cur_ts.tv_sec,
cur_ts.tv_nsec/NSEC_PER_USEC);
rc = cam_mem_get_cpu_buf(dump_args->buf_handle,
&icp_dump_args.cpu_addr, &icp_dump_args.buf_len);
if (rc) {
@ -5270,6 +5277,7 @@ hw_dump:
if (icp_dump_args.buf_len <= dump_args->offset) {
CAM_WARN(CAM_ICP, "dump buffer overshoot len %zu offset %zu",
icp_dump_args.buf_len, dump_args->offset);
cam_mem_put_cpu_buf(dump_args->buf_handle);
return -ENOSPC;
}
@ -5280,6 +5288,7 @@ hw_dump:
if (remain_len < min_len) {
CAM_WARN(CAM_ICP, "dump buffer exhaust remain %zu min %u",
remain_len, min_len);
cam_mem_put_cpu_buf(dump_args->buf_handle);
return -ENOSPC;
}
@ -5306,6 +5315,8 @@ hw_dump:
CAM_DBG(CAM_ICP, "Offset before %zu after %zu",
dump_args->offset, icp_dump_args.offset);
dump_args->offset = icp_dump_args.offset;
cam_mem_put_cpu_buf(dump_args->buf_handle);
return rc;
}

View file

@ -334,7 +334,8 @@ static int cam_isp_ctx_dump_req(
CAM_ERR(CAM_ISP,
"Invalid offset exp %u actual %u",
req_isp->cfg[i].offset, (uint32_t)len);
return rc;
cam_mem_put_cpu_buf(req_isp->cfg[i].handle);
return -EINVAL;
}
remain_len = len - req_isp->cfg[i].offset;
@ -344,7 +345,8 @@ static int cam_isp_ctx_dump_req(
"Invalid len exp %u remain_len %u",
req_isp->cfg[i].len,
(uint32_t)remain_len);
return rc;
cam_mem_put_cpu_buf(req_isp->cfg[i].handle);
return -EINVAL;
}
buf_start = (uint32_t *)((uint8_t *) buf_addr +
@ -368,6 +370,7 @@ static int cam_isp_ctx_dump_req(
} else {
cam_cdm_util_dump_cmd_buf(buf_start, buf_end);
}
cam_mem_put_cpu_buf(req_isp->cfg[i].handle);
}
}
return rc;
@ -3260,6 +3263,7 @@ hw_dump:
spin_unlock_bh(&ctx->lock);
CAM_WARN(CAM_ISP, "Dump buffer overshoot len %zu offset %zu",
buf_len, dump_info->offset);
cam_mem_put_cpu_buf(dump_info->buf_handle);
return -ENOSPC;
}
@ -3271,6 +3275,7 @@ hw_dump:
spin_unlock_bh(&ctx->lock);
CAM_WARN(CAM_ISP, "Dump buffer exhaust remain %zu min %u",
remain_len, min_len);
cam_mem_put_cpu_buf(dump_info->buf_handle);
return -ENOSPC;
}
@ -3309,20 +3314,17 @@ hw_dump:
if (rc) {
CAM_ERR(CAM_ISP, "Dump event fail %lld",
req->request_id);
spin_unlock_bh(&ctx->lock);
return rc;
}
if (dump_only_event_record) {
spin_unlock_bh(&ctx->lock);
return rc;
goto end;
}
if (dump_only_event_record)
goto end;
rc = __cam_isp_ctx_dump_req_info(ctx, req, cpu_addr,
buf_len, &dump_info->offset);
if (rc) {
CAM_ERR(CAM_ISP, "Dump Req info fail %lld",
req->request_id);
spin_unlock_bh(&ctx->lock);
return rc;
goto end;
}
spin_unlock_bh(&ctx->lock);
@ -3336,6 +3338,12 @@ hw_dump:
&dump_args);
dump_info->offset = dump_args.offset;
}
cam_mem_put_cpu_buf(dump_info->buf_handle);
return rc;
end:
spin_unlock_bh(&ctx->lock);
cam_mem_put_cpu_buf(dump_info->buf_handle);
return rc;
}
@ -4581,6 +4589,7 @@ static int __cam_isp_ctx_config_dev_in_top_state(
__cam_isp_ctx_schedule_apply_req_offline(ctx_isp);
}
cam_mem_put_cpu_buf((int32_t) cmd->packet_handle);
return rc;
put_ref:
@ -4594,6 +4603,7 @@ free_req:
list_add_tail(&req->list, &ctx->free_req_list);
spin_unlock_bh(&ctx->lock);
cam_mem_put_cpu_buf((int32_t) cmd->packet_handle);
return rc;
}

View file

@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2017-2021, The Linux Foundation. All rights reserved.
* Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
* Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/slab.h>
@ -6735,6 +6735,7 @@ static int cam_ife_mgr_dump(void *hw_mgr_priv, void *args)
}
}
dump_args->offset = isp_hw_dump_args.offset;
cam_mem_put_cpu_buf(dump_args->buf_handle);
end:
CAM_DBG(CAM_ISP, "offset %u", dump_args->offset);
return rc;

View file

@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2017-2021, The Linux Foundation. All rights reserved.
* Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <media/cam_defs.h>
@ -117,6 +118,7 @@ static int cam_isp_update_dual_config(
(cmd_desc->offset >=
(len - sizeof(struct cam_isp_dual_config)))) {
CAM_ERR(CAM_ISP, "not enough buffer provided");
cam_mem_put_cpu_buf(cmd_desc->mem_handle);
return -EINVAL;
}
remain_len = len - cmd_desc->offset;
@ -127,6 +129,7 @@ static int cam_isp_update_dual_config(
sizeof(struct cam_isp_dual_stripe_config)) >
(remain_len - offsetof(struct cam_isp_dual_config, stripes))) {
CAM_ERR(CAM_ISP, "not enough buffer for all the dual configs");
cam_mem_put_cpu_buf(cmd_desc->mem_handle);
return -EINVAL;
}
for (i = 0; i < dual_config->num_ports; i++) {
@ -184,6 +187,7 @@ static int cam_isp_update_dual_config(
}
end:
cam_mem_put_cpu_buf(cmd_desc->mem_handle);
return rc;
}

View file

@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2017-2021, The Linux Foundation. All rights reserved.
* Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
* Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/uaccess.h>
@ -155,6 +155,7 @@ static int cam_jpeg_mgr_process_irq(void *priv, void *data)
CAM_ERR(CAM_JPEG, "Invalid offset: %u cmd buf len: %zu",
p_cfg_req->hw_cfg_args.hw_update_entries[
CAM_JPEG_PARAM].offset, cmd_buf_len);
cam_mem_put_cpu_buf(mem_hdl);
return -EINVAL;
}
@ -181,6 +182,7 @@ static int cam_jpeg_mgr_process_irq(void *priv, void *data)
mutex_lock(&g_jpeg_hw_mgr.hw_mgr_mutex);
list_add_tail(&p_cfg_req->list, &hw_mgr->free_req_list);
mutex_unlock(&g_jpeg_hw_mgr.hw_mgr_mutex);
cam_mem_put_cpu_buf(mem_hdl);
return rc;
}
@ -282,6 +284,8 @@ static int cam_jpeg_insert_cdm_change_base(
if (config_args->hw_update_entries[CAM_JPEG_CHBASE].offset >=
ch_base_len) {
CAM_ERR(CAM_JPEG, "Not enough buf");
cam_mem_put_cpu_buf(
config_args->hw_update_entries[CAM_JPEG_CHBASE].handle);
return -EINVAL;
}
CAM_DBG(CAM_JPEG, "iova %pK len %zu offset %d",
@ -312,6 +316,9 @@ static int cam_jpeg_insert_cdm_change_base(
ch_base_iova_addr += size;
*ch_base_iova_addr = 0;
cam_mem_put_cpu_buf(
config_args->hw_update_entries[CAM_JPEG_CHBASE].handle);
return rc;
}
@ -1595,6 +1602,7 @@ hw_dump:
CAM_WARN(CAM_JPEG, "dump offset overshoot len %zu offset %zu",
jpeg_dump_args.buf_len, dump_args->offset);
mutex_unlock(&hw_mgr->hw_mgr_mutex);
cam_mem_put_cpu_buf(dump_args->buf_handle);
return -ENOSPC;
}
@ -1605,6 +1613,7 @@ hw_dump:
CAM_WARN(CAM_JPEG, "dump buffer exhaust remain %zu min %u",
remain_len, min_len);
mutex_unlock(&hw_mgr->hw_mgr_mutex);
cam_mem_put_cpu_buf(dump_args->buf_handle);
return -ENOSPC;
}
@ -1637,6 +1646,7 @@ hw_dump:
CAM_DBG(CAM_JPEG, "Offset before %u after %u",
dump_args->offset, jpeg_dump_args.offset);
dump_args->offset = jpeg_dump_args.offset;
cam_mem_put_cpu_buf(dump_args->buf_handle);
return rc;
}

View file

@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2017-2020, The Linux Foundation. All rights reserved.
* Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
* Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/module.h>
@ -696,6 +696,7 @@ static int cam_lrme_mgr_hw_dump(void *hw_mgr_priv, void *hw_dump_args)
CAM_DBG(CAM_LRME, "Offset before %zu after %zu",
dump_args->offset, lrme_dump_args.offset);
dump_args->offset = lrme_dump_args.offset;
cam_mem_put_cpu_buf(dump_args->buf_handle);
return rc;
}

View file

@ -271,24 +271,33 @@ int cam_mem_get_cpu_buf(int32_t buf_handle, uintptr_t *vaddr_ptr, size_t *len)
return -EINVAL;
idx = CAM_MEM_MGR_GET_HDL_IDX(buf_handle);
if (idx >= CAM_MEM_BUFQ_MAX || idx <= 0)
return -EINVAL;
if (!tbl.bufq[idx].active)
return -EPERM;
if (buf_handle != tbl.bufq[idx].buf_handle)
if (buf_handle != tbl.bufq[idx].buf_handle) {
CAM_ERR(CAM_MEM, "idx: %d Invalid buf handle %d",
idx, buf_handle);
return -EINVAL;
}
if (!(tbl.bufq[idx].flags & CAM_MEM_FLAG_KMD_ACCESS))
if (!(tbl.bufq[idx].flags & CAM_MEM_FLAG_KMD_ACCESS)) {
CAM_ERR(CAM_MEM, "idx: %d Invalid flag 0x%x",
idx, tbl.bufq[idx].flags);
return -EINVAL;
}
if (tbl.bufq[idx].kmdvaddr) {
if (tbl.bufq[idx].kmdvaddr &&
kref_get_unless_zero(&tbl.bufq[idx].krefcount)) {
*vaddr_ptr = tbl.bufq[idx].kmdvaddr;
*len = tbl.bufq[idx].len;
} else {
CAM_ERR(CAM_MEM, "No KMD access was requested for 0x%x handle",
buf_handle);
CAM_ERR(CAM_MEM,
"No KMD access request, vaddr= %p, idx= %d, handle= %d",
tbl.bufq[idx].kmdvaddr, idx, buf_handle);
return -EINVAL;
}
@ -707,6 +716,8 @@ int cam_mem_mgr_alloc_and_map(struct cam_mem_mgr_alloc_cmd *cmd)
memcpy(tbl.bufq[idx].hdls, cmd->mmu_hdls,
sizeof(int32_t) * cmd->num_hdl);
tbl.bufq[idx].is_imported = false;
kref_init(&tbl.bufq[idx].krefcount);
tbl.bufq[idx].smmu_mapping_client = CAM_SMMU_MAPPING_USER;
mutex_unlock(&tbl.bufq[idx].q_lock);
cmd->out.buf_handle = tbl.bufq[idx].buf_handle;
@ -812,6 +823,8 @@ int cam_mem_mgr_map(struct cam_mem_mgr_map_cmd *cmd)
memcpy(tbl.bufq[idx].hdls, cmd->mmu_hdls,
sizeof(int32_t) * cmd->num_hdl);
tbl.bufq[idx].is_imported = true;
kref_init(&tbl.bufq[idx].krefcount);
tbl.bufq[idx].smmu_mapping_client = CAM_SMMU_MAPPING_USER;
mutex_unlock(&tbl.bufq[idx].q_lock);
cmd->out.buf_handle = tbl.bufq[idx].buf_handle;
@ -962,17 +975,23 @@ void cam_mem_mgr_deinit(void)
mutex_destroy(&tbl.m_lock);
}
static int cam_mem_util_unmap(int32_t idx,
enum cam_smmu_mapping_client client)
static void cam_mem_util_unmap(struct kref *kref)
{
int rc = 0;
int32_t idx;
enum cam_smmu_region_id region = CAM_SMMU_REGION_SHARED;
enum cam_smmu_mapping_client client;
struct cam_mem_buf_queue *bufq =
container_of(kref, typeof(*bufq), krefcount);
idx = CAM_MEM_MGR_GET_HDL_IDX(bufq->buf_handle);
if (idx >= CAM_MEM_BUFQ_MAX || idx <= 0) {
CAM_ERR(CAM_MEM, "Incorrect index");
return -EINVAL;
return;
}
client = tbl.bufq[idx].smmu_mapping_client;
CAM_DBG(CAM_MEM, "Flags = %X idx %d", tbl.bufq[idx].flags, idx);
mutex_lock(&tbl.m_lock);
@ -981,7 +1000,7 @@ static int cam_mem_util_unmap(int32_t idx,
CAM_WARN(CAM_MEM, "Buffer at idx=%d is already unmapped,",
idx);
mutex_unlock(&tbl.m_lock);
return 0;
return;
}
if (tbl.bufq[idx].flags & CAM_MEM_FLAG_KMD_ACCESS) {
@ -1041,13 +1060,50 @@ static int cam_mem_util_unmap(int32_t idx,
clear_bit(idx, tbl.bitmap);
mutex_unlock(&tbl.m_lock);
return rc;
}
void cam_mem_put_cpu_buf(int32_t buf_handle)
{
int rc = 0;
int idx;
if (!buf_handle) {
CAM_ERR(CAM_MEM, "Invalid buf_handle");
return;
}
idx = CAM_MEM_MGR_GET_HDL_IDX(buf_handle);
if (idx >= CAM_MEM_BUFQ_MAX || idx <= 0) {
CAM_ERR(CAM_MEM, "idx: %d not valid", idx);
return;
}
if (!tbl.bufq[idx].active) {
CAM_ERR(CAM_MEM, "idx: %d not active", idx);
rc = -EPERM;
return;
}
if (buf_handle != tbl.bufq[idx].buf_handle) {
CAM_ERR(CAM_MEM, "idx: %d Invalid buf handle %d",
idx, buf_handle);
rc = -EINVAL;
return;
}
if (kref_put(&tbl.bufq[idx].krefcount, cam_mem_util_unmap))
CAM_DBG(CAM_MEM,
"Called unmap from here, buf_handle: %u, idx: %d",
buf_handle, idx);
}
EXPORT_SYMBOL(cam_mem_put_cpu_buf);
int cam_mem_mgr_release(struct cam_mem_mgr_release_cmd *cmd)
{
int idx;
int rc;
int rc = 0;
if (!atomic_read(&cam_mem_mgr_state)) {
CAM_ERR(CAM_MEM, "failed. mem_mgr not initialized");
@ -1079,7 +1135,11 @@ int cam_mem_mgr_release(struct cam_mem_mgr_release_cmd *cmd)
}
CAM_DBG(CAM_MEM, "Releasing hdl = %x, idx = %d", cmd->buf_handle, idx);
rc = cam_mem_util_unmap(idx, CAM_SMMU_MAPPING_USER);
if (kref_put(&tbl.bufq[idx].krefcount, cam_mem_util_unmap))
CAM_DBG(CAM_MEM,
"Called unmap from here, buf_handle: %u, idx: %d",
cmd->buf_handle, idx);
return rc;
}
@ -1200,6 +1260,8 @@ int cam_mem_mgr_request_mem(struct cam_mem_mgr_request_desc *inp,
memcpy(tbl.bufq[idx].hdls, &smmu_hdl,
sizeof(int32_t));
tbl.bufq[idx].is_imported = false;
kref_init(&tbl.bufq[idx].krefcount);
tbl.bufq[idx].smmu_mapping_client = CAM_SMMU_MAPPING_KERNEL;
mutex_unlock(&tbl.bufq[idx].q_lock);
out->kva = kvaddr;
@ -1225,7 +1287,7 @@ EXPORT_SYMBOL(cam_mem_mgr_request_mem);
int cam_mem_mgr_release_mem(struct cam_mem_mgr_memory_desc *inp)
{
int32_t idx;
int rc;
int rc = 0;
if (!atomic_read(&cam_mem_mgr_state)) {
CAM_ERR(CAM_MEM, "failed. mem_mgr not initialized");
@ -1259,7 +1321,12 @@ int cam_mem_mgr_release_mem(struct cam_mem_mgr_memory_desc *inp)
}
CAM_DBG(CAM_MEM, "Releasing hdl = %X", inp->mem_handle);
rc = cam_mem_util_unmap(idx, CAM_SMMU_MAPPING_KERNEL);
if (kref_put(&tbl.bufq[idx].krefcount, cam_mem_util_unmap))
CAM_DBG(CAM_MEM,
"Called unmap from here, buf_handle: %u, idx: %d",
tbl.bufq[idx].buf_handle, idx);
else
rc = -EINVAL;
return rc;
}
@ -1348,6 +1415,8 @@ int cam_mem_mgr_reserve_memory_region(struct cam_mem_mgr_request_desc *inp,
memcpy(tbl.bufq[idx].hdls, &smmu_hdl,
sizeof(int32_t));
tbl.bufq[idx].is_imported = false;
kref_init(&tbl.bufq[idx].krefcount);
tbl.bufq[idx].smmu_mapping_client = CAM_SMMU_MAPPING_KERNEL;
mutex_unlock(&tbl.bufq[idx].q_lock);
out->kva = 0;
@ -1432,9 +1501,12 @@ int cam_mem_mgr_free_memory_region(struct cam_mem_mgr_memory_desc *inp)
}
CAM_DBG(CAM_MEM, "Releasing hdl = %X", inp->mem_handle);
rc = cam_mem_util_unmap(idx, CAM_SMMU_MAPPING_KERNEL);
if (rc)
CAM_ERR(CAM_MEM, "unmapping secondary heap failed");
if (kref_put(&tbl.bufq[idx].krefcount, cam_mem_util_unmap))
CAM_DBG(CAM_MEM,
"Called unmap from here, buf_handle: %u, idx: %d",
inp->mem_handle, idx);
else
rc = -EINVAL;
return rc;
}

View file

@ -1,6 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2016-2020, The Linux Foundation. All rights reserved.
* Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef _CAM_MEM_MGR_H_
@ -28,19 +29,23 @@ enum cam_smmu_mapping_client {
/**
* struct cam_mem_buf_queue
*
* @dma_buf: pointer to the allocated dma_buf in the table
* @q_lock: mutex lock for buffer
* @hdls: list of mapped handles
* @num_hdl: number of handles
* @fd: file descriptor of buffer
* @buf_handle: unique handle for buffer
* @align: alignment for allocation
* @len: size of buffer
* @flags: attributes of buffer
* @vaddr: IOVA of buffer
* @kmdvaddr: Kernel virtual address
* @active: state of the buffer
* @is_imported: Flag indicating if buffer is imported from an FD in user space
* @dma_buf: pointer to the allocated dma_buf in the table
* @q_lock: mutex lock for buffer
* @hdls: list of mapped handles
* @num_hdl: number of handles
* @fd: file descriptor of buffer
* @buf_handle: unique handle for buffer
* @align: alignment for allocation
* @len: size of buffer
* @flags: attributes of buffer
* @vaddr: IOVA of buffer
* @kmdvaddr: Kernel virtual address
* @active: state of the buffer
* @is_imported: Flag indicating if buffer is imported from an FD in user
* space
* @krefcount: Reference counter to track whether the buffer is
* mapped and in use
* @smmu_mapping_client: Client buffer (User or kernel)
*/
struct cam_mem_buf_queue {
struct dma_buf *dma_buf;
@ -56,6 +61,8 @@ struct cam_mem_buf_queue {
uintptr_t kmdvaddr;
bool active;
bool is_imported;
struct kref krefcount;
enum cam_smmu_mapping_client smmu_mapping_client;
};
/**

View file

@ -1,6 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
* Copyright (c) 2023, Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef _CAM_MEM_MGR_API_H_
@ -90,6 +91,14 @@ int cam_mem_get_io_buf(int32_t buf_handle, int32_t mmu_handle,
int cam_mem_get_cpu_buf(int32_t buf_handle, uintptr_t *vaddr_ptr,
size_t *len);
/**
* @brief: This indicates end of CPU access
*
* @buf_handle: Handle for the buffer
*
*/
void cam_mem_put_cpu_buf(int32_t buf_handle);
static inline bool cam_mem_is_secure_buf(int32_t buf_handle)
{
return CAM_MEM_MGR_IS_SECURE_HDL(buf_handle);

View file

@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2016-2020, The Linux Foundation. All rights reserved.
* Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
* Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/module.h>
@ -2715,9 +2715,7 @@ static int cam_req_mgr_cb_add_req(struct cam_req_mgr_add_request *add_req)
return -EINVAL;
}
link = (struct cam_req_mgr_core_link *)
cam_get_device_priv(add_req->link_hdl);
link = cam_get_link_priv(add_req->link_hdl);
if (!link) {
CAM_DBG(CAM_CRM, "link ptr NULL %x", add_req->link_hdl);
return -EINVAL;
@ -2794,8 +2792,7 @@ static int cam_req_mgr_cb_notify_err(
goto end;
}
link = (struct cam_req_mgr_core_link *)
cam_get_device_priv(err_info->link_hdl);
link = cam_get_link_priv(err_info->link_hdl);
if (!link) {
CAM_DBG(CAM_CRM, "link ptr NULL %x", err_info->link_hdl);
rc = -EINVAL;
@ -2882,8 +2879,7 @@ static int cam_req_mgr_cb_notify_timer(
goto end;
}
link = (struct cam_req_mgr_core_link *)
cam_get_device_priv(timer_data->link_hdl);
link = cam_get_link_priv(timer_data->link_hdl);
if (!link) {
CAM_DBG(CAM_CRM, "link ptr NULL %x", timer_data->link_hdl);
rc = -EINVAL;
@ -2929,8 +2925,7 @@ static int cam_req_mgr_cb_notify_stop(
goto end;
}
link = (struct cam_req_mgr_core_link *)
cam_get_device_priv(stop_info->link_hdl);
link = cam_get_link_priv(stop_info->link_hdl);
if (!link) {
CAM_DBG(CAM_CRM, "link ptr NULL %x", stop_info->link_hdl);
rc = -EINVAL;
@ -2991,8 +2986,7 @@ static int cam_req_mgr_cb_notify_trigger(
goto end;
}
link = (struct cam_req_mgr_core_link *)
cam_get_device_priv(trigger_data->link_hdl);
link = cam_get_link_priv(trigger_data->link_hdl);
if (!link) {
CAM_DBG(CAM_CRM, "link ptr NULL %x", trigger_data->link_hdl);
rc = -EINVAL;
@ -3347,7 +3341,7 @@ static int __cam_req_mgr_unlink(struct cam_req_mgr_core_link *link)
__cam_req_mgr_destroy_subdev(&link->l_dev);
/* Destroy the link handle */
rc = cam_destroy_device_hdl(link->link_hdl);
rc = cam_destroy_link_hdl(link->link_hdl);
if (rc < 0) {
CAM_ERR(CAM_CRM, "error destroying link hdl %x rc %d",
link->link_hdl, rc);
@ -3373,14 +3367,14 @@ int cam_req_mgr_destroy_session(
}
mutex_lock(&g_crm_core_dev->crm_lock);
cam_session = (struct cam_req_mgr_core_session *)
cam_get_device_priv(ses_info->session_hdl);
cam_session = cam_get_session_priv(ses_info->session_hdl);
if (!cam_session ||
(cam_session->session_hdl != ses_info->session_hdl)) {
CAM_ERR(CAM_CRM, "ses:%s ses_info->ses_hdl:%x ses->ses_hdl:%x",
CAM_ERR(CAM_CRM,
"ses: %s, ses_info->ses_hdl:%x, session->ses_hdl:%x",
CAM_IS_NULL_TO_STR(cam_session), ses_info->session_hdl,
(!cam_session) ? CAM_REQ_MGR_DEFAULT_HDL_VAL :
cam_session->session_hdl);
cam_session->session_hdl);
rc = -ENOENT;
goto end;
@ -3443,15 +3437,16 @@ int cam_req_mgr_link(struct cam_req_mgr_ver_info *link_info)
mutex_lock(&g_crm_core_dev->crm_lock);
/* session hdl's priv data is cam session struct */
cam_session = (struct cam_req_mgr_core_session *)
cam_get_device_priv(link_info->u.link_info_v1.session_hdl);
cam_session =
cam_get_session_priv(link_info->u.link_info_v1.session_hdl);
if (!cam_session || (cam_session->session_hdl !=
link_info->u.link_info_v1.session_hdl)) {
CAM_ERR(CAM_CRM, "ses:%s link_info->ses_hdl:%x ses->ses_hdl:%x",
link_info->u.link_info_v1.session_hdl)) {
CAM_ERR(CAM_CRM,
"ses: %s, link_info->ses_hdl:%x, session->ses_hdl:%x",
CAM_IS_NULL_TO_STR(cam_session),
link_info->u.link_info_v1.session_hdl,
(!cam_session) ? CAM_REQ_MGR_DEFAULT_HDL_VAL :
cam_session->session_hdl);
cam_session->session_hdl);
mutex_unlock(&g_crm_core_dev->crm_lock);
return -EINVAL;
}
@ -3470,8 +3465,8 @@ int cam_req_mgr_link(struct cam_req_mgr_ver_info *link_info)
root_dev.priv = (void *)link;
mutex_lock(&link->lock);
/* Create unique dev handle for link */
link->link_hdl = cam_create_device_hdl(&root_dev);
/* Create unique handle for link */
link->link_hdl = cam_create_link_hdl(&root_dev);
if (link->link_hdl < 0) {
CAM_ERR(CAM_CRM,
"Insufficient memory to create new device handle");
@ -3526,7 +3521,7 @@ int cam_req_mgr_link(struct cam_req_mgr_ver_info *link_info)
setup_failed:
__cam_req_mgr_destroy_subdev(&link->l_dev);
create_subdev_failed:
cam_destroy_device_hdl(link->link_hdl);
cam_destroy_link_hdl(link->link_hdl);
link_info->u.link_info_v1.link_hdl = -1;
link_hdl_fail:
mutex_unlock(&link->lock);
@ -3558,15 +3553,16 @@ int cam_req_mgr_link_v2(struct cam_req_mgr_ver_info *link_info)
mutex_lock(&g_crm_core_dev->crm_lock);
/* session hdl's priv data is cam session struct */
cam_session = (struct cam_req_mgr_core_session *)
cam_get_device_priv(link_info->u.link_info_v2.session_hdl);
cam_session =
cam_get_session_priv(link_info->u.link_info_v2.session_hdl);
if (!cam_session || (cam_session->session_hdl !=
link_info->u.link_info_v2.session_hdl)) {
CAM_ERR(CAM_CRM, "ses:%s link_info->ses_hdl:%x ses->ses_hdl:%x",
link_info->u.link_info_v2.session_hdl)) {
CAM_ERR(CAM_CRM,
"ses: %s, link_info->ses_hdl:%x, session->ses_hdl:%x",
CAM_IS_NULL_TO_STR(cam_session),
link_info->u.link_info_v2.session_hdl,
(!cam_session) ? CAM_REQ_MGR_DEFAULT_HDL_VAL :
cam_session->session_hdl);
cam_session->session_hdl);
mutex_unlock(&g_crm_core_dev->crm_lock);
return -EINVAL;
}
@ -3585,8 +3581,8 @@ int cam_req_mgr_link_v2(struct cam_req_mgr_ver_info *link_info)
root_dev.priv = (void *)link;
mutex_lock(&link->lock);
/* Create unique dev handle for link */
link->link_hdl = cam_create_device_hdl(&root_dev);
/* Create unique handle for link */
link->link_hdl = cam_create_link_hdl(&root_dev);
if (link->link_hdl < 0) {
CAM_ERR(CAM_CRM,
"Insufficient memory to create new device handle");
@ -3641,7 +3637,7 @@ int cam_req_mgr_link_v2(struct cam_req_mgr_ver_info *link_info)
setup_failed:
__cam_req_mgr_destroy_subdev(&link->l_dev);
create_subdev_failed:
cam_destroy_device_hdl(link->link_hdl);
cam_destroy_link_hdl(link->link_hdl);
link_info->u.link_info_v2.link_hdl = -1;
link_hdl_fail:
mutex_unlock(&link->lock);
@ -3666,21 +3662,21 @@ int cam_req_mgr_unlink(struct cam_req_mgr_unlink_info *unlink_info)
CAM_DBG(CAM_CRM, "link_hdl %x", unlink_info->link_hdl);
/* session hdl's priv data is cam session struct */
cam_session = (struct cam_req_mgr_core_session *)
cam_get_device_priv(unlink_info->session_hdl);
cam_session = cam_get_session_priv(unlink_info->session_hdl);
if (!cam_session ||
(cam_session->session_hdl != unlink_info->session_hdl)) {
CAM_ERR(CAM_CRM, "ses:%s unlink->ses_hdl:%x ses->ses_hdl:%x",
CAM_ERR(CAM_CRM,
"ses: %s, unlink_info->ses_hdl:%x, cam_ses->ses_hdl:%x",
CAM_IS_NULL_TO_STR(cam_session),
unlink_info->session_hdl,
(!cam_session) ? CAM_REQ_MGR_DEFAULT_HDL_VAL :
cam_session->session_hdl);
cam_session->session_hdl);
mutex_unlock(&g_crm_core_dev->crm_lock);
return -EINVAL;
}
/* link hdl's priv data is core_link struct */
link = cam_get_device_priv(unlink_info->link_hdl);
link = cam_get_link_priv(unlink_info->link_hdl);
if (!link || (link->link_hdl != unlink_info->link_hdl)) {
CAM_ERR(CAM_CRM, "link:%s unlink->lnk_hdl:%x link->lnk_hdl:%x",
CAM_IS_NULL_TO_STR(link), unlink_info->link_hdl,
@ -3714,8 +3710,7 @@ int cam_req_mgr_schedule_request(
}
mutex_lock(&g_crm_core_dev->crm_lock);
link = (struct cam_req_mgr_core_link *)
cam_get_device_priv(sched_req->link_hdl);
link = cam_get_link_priv(sched_req->link_hdl);
if (!link || (link->link_hdl != sched_req->link_hdl)) {
CAM_ERR(CAM_CRM, "lnk:%s schd_req->lnk_hdl:%x lnk->lnk_hdl:%x",
CAM_IS_NULL_TO_STR(link), sched_req->link_hdl,
@ -3828,14 +3823,15 @@ int cam_req_mgr_sync_config(
mutex_lock(&g_crm_core_dev->crm_lock);
/* session hdl's priv data is cam session struct */
cam_session = (struct cam_req_mgr_core_session *)
cam_get_device_priv(sync_info->session_hdl);
cam_session = cam_get_session_priv(sync_info->session_hdl);
if (!cam_session ||
(cam_session->session_hdl != sync_info->session_hdl)) {
CAM_ERR(CAM_CRM, "ses:%s sync_info->ses_hdl:%x ses->ses_hdl:%x",
CAM_IS_NULL_TO_STR(cam_session), sync_info->session_hdl,
(!cam_session) ?
CAM_REQ_MGR_DEFAULT_HDL_VAL : cam_session->session_hdl);
CAM_ERR(CAM_CRM,
"ses: %s, sync_info->session_hdl:%x, ses->ses_hdl:%x",
CAM_IS_NULL_TO_STR(cam_session),
sync_info->session_hdl,
(!cam_session) ? CAM_REQ_MGR_DEFAULT_HDL_VAL :
cam_session->session_hdl);
mutex_unlock(&g_crm_core_dev->crm_lock);
return -EINVAL;
}
@ -3846,7 +3842,7 @@ int cam_req_mgr_sync_config(
sync_info->link_hdls[0], sync_info->link_hdls[1]);
/* only two links existing per session in dual cam use case*/
link1 = cam_get_device_priv(sync_info->link_hdls[0]);
link1 = cam_get_link_priv(sync_info->link_hdls[0]);
if (!link1 || (link1->link_hdl != sync_info->link_hdls[0])) {
CAM_ERR(CAM_CRM, "lnk:%s sync_info->lnk_hdl[0]:%x lnk1_hdl:%x",
CAM_IS_NULL_TO_STR(link1), sync_info->link_hdls[0],
@ -3856,7 +3852,7 @@ int cam_req_mgr_sync_config(
goto done;
}
link2 = cam_get_device_priv(sync_info->link_hdls[1]);
link2 = cam_get_link_priv(sync_info->link_hdls[1]);
if (!link2 || (link2->link_hdl != sync_info->link_hdls[1])) {
CAM_ERR(CAM_CRM, "lnk:%s sync_info->lnk_hdl[1]:%x lnk2_hdl:%x",
CAM_IS_NULL_TO_STR(link2), sync_info->link_hdls[1],
@ -3928,9 +3924,9 @@ int cam_req_mgr_flush_requests(
}
mutex_lock(&g_crm_core_dev->crm_lock);
/* session hdl's priv data is cam session struct */
session = (struct cam_req_mgr_core_session *)
cam_get_device_priv(flush_info->session_hdl);
session = cam_get_session_priv(flush_info->session_hdl);
if (!session || (session->session_hdl != flush_info->session_hdl)) {
CAM_ERR(CAM_CRM, "ses: %s flush->ses_hdl:%x ses->ses_hdl:%x",
CAM_IS_NULL_TO_STR(session), flush_info->session_hdl,
@ -3945,8 +3941,7 @@ int cam_req_mgr_flush_requests(
goto end;
}
link = (struct cam_req_mgr_core_link *)
cam_get_device_priv(flush_info->link_hdl);
link = cam_get_link_priv(flush_info->link_hdl);
if (!link || (link->link_hdl != flush_info->link_hdl)) {
CAM_ERR(CAM_CRM, "link:%s flush->link_hdl:%x link->link_hdl:%x",
CAM_IS_NULL_TO_STR(link), flush_info->link_hdl,
@ -4006,8 +4001,7 @@ int cam_req_mgr_link_control(struct cam_req_mgr_link_control *control)
mutex_lock(&g_crm_core_dev->crm_lock);
for (i = 0; i < control->num_links; i++) {
link = (struct cam_req_mgr_core_link *)
cam_get_device_priv(control->link_hdls[i]);
link = cam_get_link_priv(control->link_hdls[i]);
if (!link || (link->link_hdl != control->link_hdls[i])) {
CAM_ERR(CAM_CRM,
"link:%s control->lnk_hdl:%x link->lnk_hdl:%x",
@ -4090,8 +4084,7 @@ int cam_req_mgr_dump_request(struct cam_dump_req_cmd *dump_req)
mutex_lock(&g_crm_core_dev->crm_lock);
/* session hdl's priv data is cam session struct */
session = (struct cam_req_mgr_core_session *)
cam_get_device_priv(dump_req->session_handle);
session = cam_get_session_priv(dump_req->session_handle);
if (!session || (session->session_hdl != dump_req->session_handle)) {
CAM_ERR(CAM_CRM, "ses:%s dump_req->ses_hdl:%x ses->ses_hdl:%x",
CAM_IS_NULL_TO_STR(session), dump_req->session_handle,
@ -4106,8 +4099,7 @@ int cam_req_mgr_dump_request(struct cam_dump_req_cmd *dump_req)
goto end;
}
link = (struct cam_req_mgr_core_link *)
cam_get_device_priv(dump_req->link_hdl);
link = cam_get_link_priv(dump_req->link_hdl);
if (!link || (link->link_hdl != dump_req->link_hdl)) {
CAM_ERR(CAM_CRM, "link:%s dump->link_hdl:%x link->link_hdl:%x",
CAM_IS_NULL_TO_STR(link), dump_req->link_hdl,

View file

@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
* Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#define pr_fmt(fmt) "CAM-REQ-MGR_UTIL %s:%d " fmt, __func__, __LINE__
@ -36,28 +37,29 @@ int cam_req_mgr_util_init(void)
rc = -ENOMEM;
goto hdl_tbl_alloc_failed;
}
bitmap_size = BITS_TO_LONGS(CAM_REQ_MGR_MAX_HANDLES_V2) * sizeof(long);
hdl_tbl_local->bitmap = kzalloc(bitmap_size, GFP_KERNEL);
if (!hdl_tbl_local->bitmap) {
rc = -ENOMEM;
goto bitmap_alloc_fail;
}
hdl_tbl_local->bits = bitmap_size * BITS_PER_BYTE;
spin_lock_bh(&hdl_tbl_lock);
if (hdl_tbl) {
spin_unlock_bh(&hdl_tbl_lock);
rc = -EEXIST;
kfree(hdl_tbl_local->bitmap);
kfree(hdl_tbl_local);
goto hdl_tbl_check_failed;
}
hdl_tbl = hdl_tbl_local;
spin_unlock_bh(&hdl_tbl_lock);
bitmap_size = BITS_TO_LONGS(CAM_REQ_MGR_MAX_HANDLES_V2) * sizeof(long);
hdl_tbl->bitmap = kzalloc(bitmap_size, GFP_KERNEL);
if (!hdl_tbl->bitmap) {
rc = -ENOMEM;
goto bitmap_alloc_fail;
}
hdl_tbl->bits = bitmap_size * BITS_PER_BYTE;
return rc;
bitmap_alloc_fail:
kfree(hdl_tbl);
kfree(hdl_tbl_local);
hdl_tbl = NULL;
hdl_tbl_alloc_failed:
hdl_tbl_check_failed:
@ -122,6 +124,19 @@ static int32_t cam_get_free_handle_index(void)
return idx;
}
static void cam_dump_tbl_info(void)
{
int i;
for (i = 0; i < CAM_REQ_MGR_MAX_HANDLES_V2; i++)
CAM_INFO_RATE_LIMIT_CUSTOM(CAM_CRM,
CAM_RATE_LIMIT_INTERVAL_5SEC,
CAM_REQ_MGR_MAX_HANDLES_V2,
"session_hdl=%x hdl_value=%x type=%d state=%d",
hdl_tbl->hdl[i].session_hdl, hdl_tbl->hdl[i].hdl_value,
hdl_tbl->hdl[i].type, hdl_tbl->hdl[i].state);
}
int32_t cam_create_session_hdl(void *priv)
{
int idx;
@ -137,7 +152,9 @@ int32_t cam_create_session_hdl(void *priv)
idx = cam_get_free_handle_index();
if (idx < 0) {
CAM_ERR(CAM_CRM, "Unable to create session handle");
CAM_ERR(CAM_CRM, "Unable to create session handle(idx = %d)",
idx);
cam_dump_tbl_info();
spin_unlock_bh(&hdl_tbl_lock);
return idx;
}
@ -178,7 +195,9 @@ int32_t cam_create_device_hdl(struct cam_create_dev_hdl *hdl_data)
idx = cam_get_free_handle_index();
if (idx < 0) {
CAM_ERR(CAM_CRM, "Unable to create device handle");
CAM_ERR(CAM_CRM, "Unable to create device handle(idx= %d)",
idx);
cam_dump_tbl_info();
spin_unlock_bh(&hdl_tbl_lock);
return idx;
}
@ -197,7 +216,42 @@ int32_t cam_create_device_hdl(struct cam_create_dev_hdl *hdl_data)
return handle;
}
void *cam_get_device_priv(int32_t dev_hdl)
int32_t cam_create_link_hdl(struct cam_create_dev_hdl *hdl_data)
{
int idx;
int rand = 0;
int32_t handle;
spin_lock_bh(&hdl_tbl_lock);
if (!hdl_tbl) {
CAM_ERR(CAM_CRM, "Hdl tbl is NULL");
spin_unlock_bh(&hdl_tbl_lock);
return -EINVAL;
}
idx = cam_get_free_handle_index();
if (idx < 0) {
CAM_ERR(CAM_CRM, "Unable to create link handle(idx = %d)", idx);
cam_dump_tbl_info();
spin_unlock_bh(&hdl_tbl_lock);
return idx;
}
get_random_bytes(&rand, CAM_REQ_MGR_RND1_BYTES);
handle = GET_DEV_HANDLE(rand, HDL_TYPE_LINK, idx);
hdl_tbl->hdl[idx].session_hdl = hdl_data->session_hdl;
hdl_tbl->hdl[idx].hdl_value = handle;
hdl_tbl->hdl[idx].type = HDL_TYPE_LINK;
hdl_tbl->hdl[idx].state = HDL_ACTIVE;
hdl_tbl->hdl[idx].priv = hdl_data->priv;
hdl_tbl->hdl[idx].ops = NULL;
spin_unlock_bh(&hdl_tbl_lock);
CAM_DBG(CAM_CRM, "handle = %x", handle);
return handle;
}
void *cam_get_priv(int32_t dev_hdl, int handle_type)
{
int idx;
int type;
@ -211,18 +265,19 @@ void *cam_get_device_priv(int32_t dev_hdl)
idx = CAM_REQ_MGR_GET_HDL_IDX(dev_hdl);
if (idx >= CAM_REQ_MGR_MAX_HANDLES_V2) {
CAM_ERR_RATE_LIMIT(CAM_CRM, "Invalid idx");
CAM_ERR_RATE_LIMIT(CAM_CRM, "Invalid idx: %d", idx);
goto device_priv_fail;
}
if (hdl_tbl->hdl[idx].state != HDL_ACTIVE) {
CAM_ERR_RATE_LIMIT(CAM_CRM, "Invalid state");
CAM_ERR_RATE_LIMIT(CAM_CRM, "Invalid state: %d",
hdl_tbl->hdl[idx].state);
goto device_priv_fail;
}
type = CAM_REQ_MGR_GET_HDL_TYPE(dev_hdl);
if (HDL_TYPE_DEV != type && HDL_TYPE_SESSION != type) {
CAM_ERR_RATE_LIMIT(CAM_CRM, "Invalid type");
if (type != handle_type) {
CAM_ERR_RATE_LIMIT(CAM_CRM, "Invalid type:%d", type);
goto device_priv_fail;
}
@ -241,6 +296,34 @@ device_priv_fail:
return NULL;
}
void *cam_get_device_priv(int32_t dev_hdl)
{
void *priv;
priv = cam_get_priv(dev_hdl, HDL_TYPE_DEV);
return priv;
}
struct cam_req_mgr_core_session *cam_get_session_priv(int32_t dev_hdl)
{
struct cam_req_mgr_core_session *priv;
priv = (struct cam_req_mgr_core_session *)
cam_get_priv(dev_hdl, HDL_TYPE_SESSION);
return priv;
}
struct cam_req_mgr_core_link *cam_get_link_priv(int32_t dev_hdl)
{
struct cam_req_mgr_core_link *priv;
priv = (struct cam_req_mgr_core_link *)
cam_get_priv(dev_hdl, HDL_TYPE_LINK);
return priv;
}
void *cam_get_device_ops(int32_t dev_hdl)
{
int idx;
@ -265,7 +348,8 @@ void *cam_get_device_ops(int32_t dev_hdl)
}
type = CAM_REQ_MGR_GET_HDL_TYPE(dev_hdl);
if (HDL_TYPE_DEV != type && HDL_TYPE_SESSION != type) {
if (type != HDL_TYPE_DEV && type != HDL_TYPE_SESSION &&
type != HDL_TYPE_LINK) {
CAM_ERR(CAM_CRM, "Invalid type");
goto device_ops_fail;
}
@ -336,6 +420,12 @@ int cam_destroy_device_hdl(int32_t dev_hdl)
return cam_destroy_hdl(dev_hdl, HDL_TYPE_DEV);
}
int cam_destroy_link_hdl(int32_t dev_hdl)
{
CAM_DBG(CAM_CRM, "handle = %x", dev_hdl);
return cam_destroy_hdl(dev_hdl, HDL_TYPE_LINK);
}
int cam_destroy_session_hdl(int32_t dev_hdl)
{
return cam_destroy_hdl(dev_hdl, HDL_TYPE_SESSION);

View file

@ -1,6 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
* Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef _CAM_REQ_MGR_UTIL_API_H_
@ -9,6 +10,9 @@
#include <media/cam_req_mgr.h>
#include "cam_req_mgr_util_priv.h"
/* Interval for cam_info_rate_limit_custom() */
#define CAM_RATE_LIMIT_INTERVAL_5SEC 5
/**
* state of a handle(session/device)
* @HDL_FREE: free handle
@ -21,12 +25,14 @@ enum hdl_state {
/**
* handle type
* @HDL_TYPE_DEV: for device and link
* @HDL_TYPE_DEV: for device
* @HDL_TYPE_SESSION: for session
* @HDL_TYPE_LINK: for link
*/
enum hdl_type {
HDL_TYPE_DEV = 1,
HDL_TYPE_SESSION
HDL_TYPE_SESSION,
HDL_TYPE_LINK
};
/**
@ -99,8 +105,19 @@ int32_t cam_create_session_hdl(void *priv);
int32_t cam_create_device_hdl(struct cam_create_dev_hdl *hdl_data);
/**
* cam_get_device_priv() - get private data of a handle
* @dev_hdl: handle for a session/link/device
* cam_create_link_hdl() - create a link handle
* @hdl_data: session hdl, flags, ops and priv dara as input
*
* cam_req_mgr_core calls this function to get
* session and link handles
* KMD drivers calls this function to create
* a link handle. Returns a unique link handle
*/
int32_t cam_create_link_hdl(struct cam_create_dev_hdl *hdl_data);
/**
* cam_get_device_priv() - get private data of a device handle
* @dev_hdl: handle for a device
*
* cam_req_mgr_core and KMD drivers use this function to
* get private data of a handle. Returns a private data
@ -108,6 +125,26 @@ int32_t cam_create_device_hdl(struct cam_create_dev_hdl *hdl_data);
*/
void *cam_get_device_priv(int32_t dev_hdl);
/**
* cam_get_session_priv() - get private data of a session handle
* @dev_hdl: handle for a session
*
* cam_req_mgr_core and KMD drivers use this function to
* get private data of a handle. Returns a private data
* structure pointer.
*/
struct cam_req_mgr_core_session *cam_get_session_priv(int32_t dev_hdl);
/**
* cam_get_link_priv() - get private data of a link handle
* @dev_hdl: handle for a link
*
* cam_req_mgr_core and KMD drivers use this function to
* get private data of a handle. Returns a private data
* structure pointer.
*/
struct cam_req_mgr_core_link *cam_get_link_priv(int32_t dev_hdl);
/**
* cam_get_device_ops() - get ops of a handle
* @dev_hdl: handle for a session/link/device
@ -119,12 +156,20 @@ void *cam_get_device_ops(int32_t dev_hdl);
/**
* cam_destroy_device_hdl() - destroy device handle
* @dev_hdl: handle for a link/device.
* @dev_hdl: handle for a device.
*
* Returns success/failure
*/
int32_t cam_destroy_device_hdl(int32_t dev_hdl);
/**
* cam_destroy_link_hdl() - destroy link handle
* @dev_hdl: handle for a link.
*
* Returns success/failure
*/
int32_t cam_destroy_link_hdl(int32_t dev_hdl);
/**
* cam_destroy_session_hdl() - destroy device handle
* @dev_hdl: handle for a session

View file

@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2017-2020, The Linux Foundation. All rights reserved.
* Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
* Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/module.h>
@ -564,6 +564,7 @@ int32_t cam_actuator_i2c_pkt_parse(struct cam_actuator_ctrl_t *a_ctrl,
}
break;
}
cam_mem_put_cpu_buf(cmd_desc[i].mem_handle);
}
if (a_ctrl->cam_act_state == CAM_ACTUATOR_ACQUIRE) {
@ -733,6 +734,7 @@ int32_t cam_actuator_i2c_pkt_parse(struct cam_actuator_ctrl_t *a_ctrl,
}
end:
cam_mem_put_cpu_buf(config.packet_handle);
return rc;
}

View file

@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2017-2020, The Linux Foundation. All rights reserved.
* Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
* Copyright (c) 2022-2023, Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/module.h>
@ -86,23 +86,31 @@ static int32_t cam_cci_validate_queue(struct cci_device *cci_dev,
struct cam_hw_soc_info *soc_info =
&cci_dev->soc_info;
void __iomem *base = soc_info->reg_map[0].mem_base;
struct cam_cci_i2c_queue_info *queue_info =
&cci_dev->cci_i2c_queue_info[master][queue];
unsigned long flags;
read_val = cam_io_r_mb(base +
CCI_I2C_M0_Q0_CUR_WORD_CNT_ADDR + reg_offset);
CAM_DBG(CAM_CCI, "CCI_I2C_M0_Q0_CUR_WORD_CNT_ADDR %d len %d max %d",
read_val, len,
cci_dev->cci_i2c_queue_info[master][queue].max_queue_size);
queue_info->max_queue_size);
if ((read_val + len + 1) >
cci_dev->cci_i2c_queue_info[master][queue].max_queue_size) {
queue_info->max_queue_size) {
uint32_t reg_val = 0;
uint32_t report_val = CCI_I2C_REPORT_CMD | (1 << 8);
uint32_t report_id = queue_info->report_id;
uint32_t report_val = CCI_I2C_REPORT_CMD | (1 << 8) |
(1 << 9) | (report_id << 4);
CAM_DBG(CAM_CCI, "CCI_I2C_REPORT_CMD");
cam_io_w_mb(report_val,
base + CCI_I2C_M0_Q0_LOAD_DATA_ADDR +
reg_offset);
read_val++;
queue_info->report_id++;
if (queue_info->report_id == REPORT_IDSIZE)
queue_info->report_id = 0;
CAM_DBG(CAM_CCI,
"CCI_I2C_M0_Q0_EXEC_WORD_CNT_ADDR %d, queue: %d",
read_val, queue);
@ -175,13 +183,37 @@ static int32_t cam_cci_lock_queue(struct cci_device *cci_dev,
enum cci_i2c_master_t master,
enum cci_i2c_queue_t queue, uint32_t en)
{
uint32_t val;
int32_t rc = 0;
uint32_t val = 0;
uint32_t read_val = 0;
struct cam_hw_soc_info *soc_info =
&cci_dev->soc_info;
void __iomem *base =
soc_info->reg_map[0].mem_base;
uint32_t reg_offset =
master * 0x200 + queue * 0x100;
if (queue != PRIORITY_QUEUE)
return 0;
goto end;
val = en ? CCI_I2C_LOCK_CMD : CCI_I2C_UNLOCK_CMD;
return cam_cci_write_i2c_queue(cci_dev, val, master, queue);
rc = cam_cci_write_i2c_queue(cci_dev, val, master, queue);
if (rc) {
CAM_ERR(CAM_CCI,
"CCI%d_I2C_M%d_Q%d Failed to write i2c data:0x%x rc:%d",
cci_dev->soc_info.index, master, queue, val, rc);
goto end;
}
read_val = cam_io_r_mb(base +
CCI_I2C_M0_Q0_CUR_WORD_CNT_ADDR + reg_offset);
cam_io_w_mb(read_val, base +
CCI_I2C_M0_Q0_EXEC_WORD_CNT_ADDR + reg_offset);
end:
return rc;
}
#ifdef DUMP_CCI_REGISTERS
@ -287,14 +319,23 @@ static void cam_cci_load_report_cmd(struct cci_device *cci_dev,
uint32_t reg_offset = master * 0x200 + queue * 0x100;
uint32_t read_val = cam_io_r_mb(base +
CCI_I2C_M0_Q0_CUR_WORD_CNT_ADDR + reg_offset);
uint32_t report_val = CCI_I2C_REPORT_CMD | (1 << 8);
struct cam_cci_i2c_queue_info *queue_info =
&cci_dev->cci_i2c_queue_info[master][queue];
uint32_t report_id = queue_info->report_id;
uint32_t report_val = CCI_I2C_REPORT_CMD | (1 << 8) |
(1 << 9) | (report_id << 4);
CAM_DBG(CAM_CCI, "CCI_I2C_REPORT_CMD curr_w_cnt: %d", read_val);
CAM_DBG(CAM_CCI, "CCI_I2C_REPORT_CMD curr_w_cnt: %d report_id %d",
read_val, report_id);
cam_io_w_mb(report_val,
base + CCI_I2C_M0_Q0_LOAD_DATA_ADDR +
reg_offset);
read_val++;
queue_info->report_id++;
if (queue_info->report_id == REPORT_IDSIZE)
queue_info->report_id = 0;
CAM_DBG(CAM_CCI, "CCI_I2C_M0_Q0_EXEC_WORD_CNT_ADDR %d", read_val);
cam_io_w_mb(read_val, base +
CCI_I2C_M0_Q0_EXEC_WORD_CNT_ADDR + reg_offset);
@ -311,7 +352,6 @@ static int32_t cam_cci_wait_report_cmd(struct cci_device *cci_dev,
uint32_t reg_val = 1 << ((master * 2) + queue);
cam_cci_load_report_cmd(cci_dev, master, queue);
spin_lock_irqsave(
&cci_dev->cci_master_info[master].lock_q[queue], flags);
atomic_set(&cci_dev->cci_master_info[master].q_free[queue], 1);
@ -336,11 +376,13 @@ static int32_t cam_cci_transfer_end(struct cci_device *cci_dev,
if (atomic_read(&cci_dev->cci_master_info[master].q_free[queue]) == 0) {
spin_unlock_irqrestore(
&cci_dev->cci_master_info[master].lock_q[queue], flags);
cam_cci_load_report_cmd(cci_dev, master, queue);
rc = cam_cci_lock_queue(cci_dev, master, queue, 0);
if (rc < 0) {
CAM_ERR(CAM_CCI, "failed rc: %d", rc);
return rc;
}
rc = cam_cci_wait_report_cmd(cci_dev, master, queue);
if (rc < 0) {
CAM_ERR(CAM_CCI, "failed rc %d", rc);
@ -357,11 +399,13 @@ static int32_t cam_cci_transfer_end(struct cci_device *cci_dev,
CAM_ERR(CAM_CCI, "failed rc %d", rc);
return rc;
}
cam_cci_load_report_cmd(cci_dev, master, queue);
rc = cam_cci_lock_queue(cci_dev, master, queue, 0);
if (rc < 0) {
CAM_ERR(CAM_CCI, "failed rc %d", rc);
return rc;
}
rc = cam_cci_wait_report_cmd(cci_dev, master, queue);
if (rc < 0) {
CAM_ERR(CAM_CCI, "Failed rc %d", rc);
@ -436,6 +480,9 @@ static int32_t cam_cci_process_full_q(struct cci_device *cci_dev,
} else {
spin_unlock_irqrestore(
&cci_dev->cci_master_info[master].lock_q[queue], flags);
CAM_DBG(CAM_CCI, "CCI%d_I2C_M%d_Q%d is set to 0",
cci_dev->soc_info.index, master, queue);
cam_cci_load_report_cmd(cci_dev, master, queue);
rc = cam_cci_wait_report_cmd(cci_dev, master, queue);
if (rc < 0) {
CAM_ERR(CAM_CCI, "failed rc %d", rc);
@ -699,6 +746,14 @@ static int32_t cam_cci_data_queue(struct cci_device *cci_dev,
cci_dev->cci_wait_sync_cfg.csid *
CCI_SET_CID_SYNC_TIMER_OFFSET);
rc = cam_cci_lock_queue(cci_dev, master, queue, 1);
if (rc < 0) {
CAM_ERR(CAM_CCI,
"CCI%d_I2C_M%d_Q%d Failed to lock_queue for rc: %d",
cci_dev->soc_info.index, master, queue, rc);
return rc;
}
val = CCI_I2C_SET_PARAM_CMD | c_ctrl->cci_info->sid << 4 |
c_ctrl->cci_info->retries << 16 |
c_ctrl->cci_info->id_map << 18;
@ -733,12 +788,6 @@ static int32_t cam_cci_data_queue(struct cci_device *cci_dev,
reg_offset);
}
rc = cam_cci_lock_queue(cci_dev, master, queue, 1);
if (rc < 0) {
CAM_ERR(CAM_CCI, "failed line %d", rc);
return rc;
}
while (cmd_size) {
uint32_t pack = 0;
@ -753,9 +802,9 @@ static int32_t cam_cci_data_queue(struct cci_device *cci_dev,
CCI_I2C_M0_Q0_CUR_WORD_CNT_ADDR + reg_offset);
CAM_DBG(CAM_CCI, "CUR_WORD_CNT_ADDR %d len %d max %d",
read_val, len, max_queue_size);
/* + 1 - space alocation for Report CMD */
if ((read_val + len + 1) > queue_size) {
if ((read_val + len + 1) > max_queue_size) {
/* + 2 - space alocation for Report and Unlock CMD */
if ((read_val + len + 2) > queue_size) {
if ((read_val + len + 2) > max_queue_size) {
rc = cam_cci_process_full_q(cci_dev,
master, queue);
if (rc < 0) {
@ -967,19 +1016,22 @@ static int32_t cam_cci_burst_read(struct v4l2_subdev *sd,
goto rel_mutex_q;
}
CAM_DBG(CAM_CCI, "set param sid 0x%x retries %d id_map %d",
c_ctrl->cci_info->sid, c_ctrl->cci_info->retries,
c_ctrl->cci_info->id_map);
val = CCI_I2C_SET_PARAM_CMD | c_ctrl->cci_info->sid << 4 |
c_ctrl->cci_info->retries << 16 |
c_ctrl->cci_info->id_map << 18;
val = CCI_I2C_LOCK_CMD;
rc = cam_cci_write_i2c_queue(cci_dev, val, master, queue);
if (rc < 0) {
CAM_DBG(CAM_CCI, "failed rc: %d", rc);
CAM_DBG(CAM_CCI,
"CCI%d_I2C_M%d_Q%d failed to write lock_cmd for rc: %d",
cci_dev->soc_info.index, master, queue, rc);
goto rel_mutex_q;
}
val = CCI_I2C_LOCK_CMD;
CAM_DBG(CAM_CCI,
"CCI%d_I2C_M%d_Q%d set param sid 0x%x retries %d id_map %d",
cci_dev->soc_info.index, master, queue, c_ctrl->cci_info->sid,
c_ctrl->cci_info->retries, c_ctrl->cci_info->id_map);
val = CCI_I2C_SET_PARAM_CMD | c_ctrl->cci_info->sid << 4 |
c_ctrl->cci_info->retries << 16 |
c_ctrl->cci_info->id_map << 18;
rc = cam_cci_write_i2c_queue(cci_dev, val, master, queue);
if (rc < 0) {
CAM_DBG(CAM_CCI, "failed rc: %d", rc);
@ -1204,7 +1256,11 @@ static int32_t cam_cci_read(struct v4l2_subdev *sd,
cci_dev->cci_i2c_queue_info[master][queue].max_queue_size - 1,
master, queue);
if (rc < 0) {
CAM_ERR(CAM_CCI, "Initial validataion failed rc %d", rc);
val = cam_io_r_mb(base + CCI_I2C_M0_Q0_CUR_CMD_ADDR +
master * 0x200 + queue * 0x100);
CAM_ERR(CAM_CCI,
"CCI%d_I2C_M%d_Q%d Initial validataion failed rc: %d, CUR_CMD:0x%x",
cci_dev->soc_info.index, master, queue, rc, val);
goto rel_mutex_q;
}
@ -1218,6 +1274,16 @@ static int32_t cam_cci_read(struct v4l2_subdev *sd,
goto rel_mutex_q;
}
val = CCI_I2C_LOCK_CMD;
rc = cam_cci_write_i2c_queue(cci_dev, val, master, queue);
if (rc < 0) {
CAM_DBG(CAM_CCI,
"CCI%d_I2C_M%d_Q%d failed rc: %d",
cci_dev->soc_info.index, master,
queue, rc);
goto rel_mutex_q;
}
CAM_DBG(CAM_CCI, "master %d, queue %d", master, queue);
CAM_DBG(CAM_CCI, "set param sid 0x%x retries %d id_map %d",
c_ctrl->cci_info->sid, c_ctrl->cci_info->retries,
@ -1231,13 +1297,6 @@ static int32_t cam_cci_read(struct v4l2_subdev *sd,
goto rel_mutex_q;
}
val = CCI_I2C_LOCK_CMD;
rc = cam_cci_write_i2c_queue(cci_dev, val, master, queue);
if (rc < 0) {
CAM_DBG(CAM_CCI, "failed rc: %d", rc);
goto rel_mutex_q;
}
if (read_cfg->addr_type >= CAMERA_SENSOR_I2C_TYPE_MAX) {
CAM_ERR(CAM_CCI, "failed : Invalid addr type: %u",
read_cfg->addr_type);
@ -1641,6 +1700,9 @@ static int32_t cam_cci_write(struct v4l2_subdev *sd,
cci_master_info = &cci_dev->cci_master_info[master];
CAM_DBG(CAM_CCI, "CCI%d_I2C_M%d ctrl_cmd = %d",
cci_dev->soc_info.index, master, c_ctrl->cmd);
switch (c_ctrl->cmd) {
case MSM_CCI_I2C_WRITE_SYNC_BLOCK:
mutex_lock(&cci_master_info->mutex_q[SYNC_QUEUE]);

View file

@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2017-2020, The Linux Foundation. All rights reserved.
* Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
* Copyright (c) 2022-2023, Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef _CAM_CCI_DEV_H_
@ -67,6 +67,8 @@
#define PRIORITY_QUEUE (QUEUE_0)
#define SYNC_QUEUE (QUEUE_1)
#define REPORT_IDSIZE 16
enum cci_i2c_sync {
MSM_SYNC_DISABLE,
MSM_SYNC_ENABLE,

View file

@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2017-2020, The Linux Foundation. All rights reserved.
* Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/module.h>
@ -363,6 +364,8 @@ int32_t cam_cmd_buf_parser(struct csiphy_device *csiphy_dev,
csiphy_dev->csiphy_info[index].data_rate,
csiphy_dev->csiphy_info[index].mipi_flags);
cam_mem_put_cpu_buf(cmd_desc->mem_handle);
cam_mem_put_cpu_buf(cfg_dev->packet_handle);
return rc;
reset_settings:
@ -374,7 +377,8 @@ reset_settings:
csiphy_dev->csiphy_info[index].mipi_flags = 0;
csiphy_dev->csiphy_info[index].secure_mode = 0;
csiphy_dev->csiphy_info[index].hdl_data.device_hdl = -1;
cam_mem_put_cpu_buf(cfg_dev->packet_handle);
cam_mem_put_cpu_buf(cmd_desc->mem_handle);
return rc;
}

View file

@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2017-2020, The Linux Foundation. All rights reserved.
* Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
* Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/module.h>
@ -891,9 +891,12 @@ static int32_t cam_eeprom_parse_write_memory_packet(
break;
}
}
cam_mem_put_cpu_buf(cmd_desc[i].mem_handle);
}
return rc;
end:
cam_mem_put_cpu_buf(cmd_desc[i].mem_handle);
return rc;
}
@ -1050,9 +1053,12 @@ static int32_t cam_eeprom_init_pkt_parser(struct cam_eeprom_ctrl_t *e_ctrl,
}
}
e_ctrl->cal_data.num_map = num_map + 1;
cam_mem_put_cpu_buf(cmd_desc[i].mem_handle);
}
return rc;
end:
cam_mem_put_cpu_buf(cmd_desc[i].mem_handle);
return rc;
}
@ -1122,6 +1128,7 @@ static int32_t cam_eeprom_get_cal_data(struct cam_eeprom_ctrl_t *e_ctrl,
e_ctrl->cal_data.num_data);
memcpy(read_buffer, e_ctrl->cal_data.mapdata,
e_ctrl->cal_data.num_data);
cam_mem_put_cpu_buf(io_cfg->mem_handle[0]);
} else {
CAM_ERR(CAM_EEPROM, "Invalid direction");
rc = -EINVAL;
@ -1370,6 +1377,7 @@ static int32_t cam_eeprom_pkt_parse(struct cam_eeprom_ctrl_t *e_ctrl, void *arg)
break;
}
cam_mem_put_cpu_buf(dev_config.packet_handle);
return rc;
power_down:
cam_eeprom_power_down(e_ctrl);

View file

@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2017-2021, The Linux Foundation. All rights reserved.
* Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/module.h>
@ -1214,6 +1215,7 @@ int cam_flash_i2c_pkt_parser(struct cam_flash_ctrl *fctrl, void *arg)
break;
}
cam_mem_put_cpu_buf(cmd_desc[i].mem_handle);
}
power_info = &fctrl->power_info;
if (!power_info) {
@ -1363,6 +1365,7 @@ update_req_mgr:
fctrl->bridge_intf.crm_cb->add_req(&add_req);
CAM_DBG(CAM_FLASH, "add req to req_mgr= %lld", add_req.req_id);
}
cam_mem_put_cpu_buf(config.packet_handle);
return rc;
}
@ -1547,6 +1550,8 @@ int cam_flash_pmic_gpio_pkt_parser(
rc = -EINVAL;
return rc;
}
cam_mem_put_cpu_buf(cmd_desc->mem_handle);
break;
}
case CAM_FLASH_PACKET_OPCODE_SET_OPS: {
@ -1643,6 +1648,8 @@ int cam_flash_pmic_gpio_pkt_parser(
rc = -EINVAL;
return rc;
}
cam_mem_put_cpu_buf(cmd_desc->mem_handle);
break;
}
case CAM_FLASH_PACKET_OPCODE_NON_REALTIME_SET_OPS: {
@ -1792,6 +1799,7 @@ int cam_flash_pmic_gpio_pkt_parser(
return rc;
}
cam_mem_put_cpu_buf(cmd_desc->mem_handle);
break;
}
case CAM_PKT_NOP_OPCODE: {
@ -1840,6 +1848,7 @@ int cam_flash_pmic_gpio_pkt_parser(
CAM_DBG(CAM_FLASH, "add req to req_mgr= %lld", add_req.req_id);
}
cam_mem_put_cpu_buf(config.packet_handle);
return rc;
}

View file

@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2017-2020, The Linux Foundation. All rights reserved.
* Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
* Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/module.h>
@ -571,6 +571,7 @@ static int cam_ois_pkt_parse(struct cam_ois_ctrl_t *o_ctrl, void *arg)
}
break;
}
cam_mem_put_cpu_buf(cmd_desc[i].mem_handle);
}
if (o_ctrl->cam_ois_state != CAM_OIS_CONFIG) {
@ -729,6 +730,7 @@ static int cam_ois_pkt_parse(struct cam_ois_ctrl_t *o_ctrl, void *arg)
(csl_packet->header.op_code & 0xFFFFFF));
return -EINVAL;
}
cam_mem_put_cpu_buf(dev_config.packet_handle);
if (!rc)
return rc;

View file

@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2017-2021, The Linux Foundation. All rights reserved.
* Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
* Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/module.h>
@ -300,6 +300,7 @@ static int32_t cam_sensor_i2c_pkt_parse(struct cam_sensor_ctrl_t *s_ctrl,
}
end:
cam_mem_put_cpu_buf(config.packet_handle);
return rc;
}
@ -657,9 +658,11 @@ int32_t cam_handle_mem_ptr(uint64_t handle, struct cam_sensor_ctrl_t *s_ctrl)
"Failed to parse the command Buffer Header");
goto end;
}
cam_mem_put_cpu_buf(cmd_desc[i].mem_handle);
}
end:
cam_mem_put_cpu_buf(handle);
return rc;
}

View file

@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2017-2020, The Linux Foundation. All rights reserved.
* Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
* Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/kernel.h>
@ -275,6 +275,7 @@ static int32_t cam_sensor_get_io_buffer(
io_cfg->direction);
rc = -EINVAL;
}
cam_mem_put_cpu_buf(io_cfg->mem_handle[0]);
return rc;
}
@ -709,9 +710,12 @@ int cam_sensor_i2c_command_parser(
}
}
i2c_reg_settings->is_settings_valid = 1;
cam_mem_put_cpu_buf(cmd_desc[i].mem_handle);
}
return rc;
end:
cam_mem_put_cpu_buf(cmd_desc[i].mem_handle);
return rc;
}

View file

@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2017-2020, The Linux Foundation. All rights reserved.
* Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
* Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/types.h>
@ -141,14 +141,16 @@ int cam_packet_util_get_kmd_buffer(struct cam_packet *packet,
((size_t)cmd_desc->size > (len - (size_t)cmd_desc->offset))) {
CAM_ERR(CAM_UTIL, "invalid memory len:%zd and cmd desc size:%d",
len, cmd_desc->size);
return -EINVAL;
rc = -EINVAL;
goto rel_kmd_buf;
}
remain_len -= (size_t)cmd_desc->offset;
if ((size_t)packet->kmd_cmd_buf_offset >= remain_len) {
CAM_ERR(CAM_UTIL, "Invalid kmd cmd buf offset: %zu",
(size_t)packet->kmd_cmd_buf_offset);
return -EINVAL;
rc = -EINVAL;
goto rel_kmd_buf;
}
cpu_addr += (cmd_desc->offset / 4) + (packet->kmd_cmd_buf_offset / 4);
@ -165,6 +167,8 @@ int cam_packet_util_get_kmd_buffer(struct cam_packet *packet,
kmd_buf->size = cmd_desc->size - cmd_desc->length;
kmd_buf->used_bytes = 0;
rel_kmd_buf:
cam_mem_put_cpu_buf(cmd_desc->mem_handle);
return rc;
}
@ -218,6 +222,8 @@ void cam_packet_dump_patch_info(struct cam_packet *packet,
if (!(*dst_cpu_addr))
CAM_ERR(CAM_ICP, "Null at dst addr %p", dst_cpu_addr);
cam_mem_put_cpu_buf(patch_desc[i].dst_buf_hdl);
}
}
@ -279,6 +285,7 @@ int cam_packet_util_process_patches(struct cam_packet *packet,
(size_t)patch_desc[i].dst_offset)) {
CAM_ERR(CAM_UTIL,
"Invalid dst buf patch offset");
cam_mem_put_cpu_buf((int32_t)patch_desc[i].dst_buf_hdl);
return -EINVAL;
}
@ -292,6 +299,7 @@ int cam_packet_util_process_patches(struct cam_packet *packet,
"patch is done for dst %pK with src %pK value %llx",
dst_cpu_addr, src_buf_iova_addr,
*((uint64_t *)dst_cpu_addr));
cam_mem_put_cpu_buf((int32_t)patch_desc[i].dst_buf_hdl);
}
return rc;
@ -332,14 +340,16 @@ int cam_packet_util_process_generic_cmd_buffer(
((size_t)cmd_buf->offset > (buf_size - sizeof(uint32_t)))) {
CAM_ERR(CAM_UTIL, "Invalid offset for cmd buf: %zu",
(size_t)cmd_buf->offset);
return -EINVAL;
rc = -EINVAL;
goto end;
}
remain_len -= (size_t)cmd_buf->offset;
if (remain_len < (size_t)cmd_buf->length) {
CAM_ERR(CAM_UTIL, "Invalid length for cmd buf: %zu",
(size_t)cmd_buf->length);
return -EINVAL;
rc = -EINVAL;
goto end;
}
blob_ptr = (uint32_t *)(((uint8_t *)cpu_addr) +
@ -389,5 +399,6 @@ int cam_packet_util_process_generic_cmd_buffer(
}
end:
cam_mem_put_cpu_buf(cmd_buf->mem_handle);
return rc;
}

View file

@ -1,6 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2017-2020, The Linux Foundation. All rights reserved.
* Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef _CAM_PACKET_UTIL_H_

View file

@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2015-2020, The Linux Foundation. All rights reserved.
* Copyright (c) 2023, Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/of.h>
@ -2064,8 +2065,7 @@ static int cam_soc_util_dump_dmi_reg_range_user_buf(
CAM_ERR(CAM_UTIL,
"Invalid input args soc_info: %pK, dump_args: %pK",
soc_info, dump_args);
rc = -EINVAL;
goto end;
return -EINVAL;
}
if (dmi_read->num_pre_writes > CAM_REG_DUMP_DMI_CONFIG_MAX ||
@ -2073,15 +2073,14 @@ static int cam_soc_util_dump_dmi_reg_range_user_buf(
CAM_ERR(CAM_UTIL,
"Invalid number of requested writes, pre: %d post: %d",
dmi_read->num_pre_writes, dmi_read->num_post_writes);
rc = -EINVAL;
goto end;
return -EINVAL;
}
rc = cam_mem_get_cpu_buf(dump_args->buf_handle, &cpu_addr, &buf_len);
if (rc) {
CAM_ERR(CAM_UTIL, "Invalid handle %u rc %d",
dump_args->buf_handle, rc);
goto end;
return -EINVAL;
}
if (buf_len <= dump_args->offset) {
@ -2167,6 +2166,8 @@ static int cam_soc_util_dump_dmi_reg_range_user_buf(
sizeof(struct cam_hw_soc_dump_header);
end:
if (dump_args)
cam_mem_put_cpu_buf(dump_args->buf_handle);
return rc;
}
@ -2191,13 +2192,13 @@ static int cam_soc_util_dump_cont_reg_range_user_buf(
"Invalid input args soc_info: %pK, dump_out_buffer: %pK reg_read: %pK",
soc_info, dump_args, reg_read);
rc = -EINVAL;
goto end;
return rc;
}
rc = cam_mem_get_cpu_buf(dump_args->buf_handle, &cpu_addr, &buf_len);
if (rc) {
CAM_ERR(CAM_UTIL, "Invalid handle %u rc %d",
dump_args->buf_handle, rc);
goto end;
return rc;
}
if (buf_len <= dump_args->offset) {
CAM_WARN(CAM_UTIL, "Dump offset overshoot %zu %zu",
@ -2247,6 +2248,8 @@ static int cam_soc_util_dump_cont_reg_range_user_buf(
dump_args->offset += hdr->size +
sizeof(struct cam_hw_soc_dump_header);
end:
if (dump_args)
cam_mem_put_cpu_buf(dump_args->buf_handle);
return rc;
}
@ -2338,6 +2341,8 @@ int cam_soc_util_reg_dump_to_cmd_buf(void *ctx,
if (rc || !cpu_addr || (buf_size == 0)) {
CAM_ERR(CAM_UTIL, "Failed in Get cpu addr, rc=%d, cpu_addr=%pK",
rc, (void *)cpu_addr);
if (rc)
return rc;
goto end;
}
@ -2539,5 +2544,6 @@ int cam_soc_util_reg_dump_to_cmd_buf(void *ctx,
}
end:
cam_mem_put_cpu_buf(cmd_desc->mem_handle);
return rc;
}