diff --git a/drivers/misc/qseecom.c b/drivers/misc/qseecom.c index 5d35f4be132e..0c5dfee9b71a 100644 --- a/drivers/misc/qseecom.c +++ b/drivers/misc/qseecom.c @@ -3153,6 +3153,30 @@ static int qseecom_prepare_unload_app(struct qseecom_dev_handle *data) pr_debug("prepare to unload app(%d)(%s), pending %d\n", data->client.app_id, data->client.app_name, data->client.unload_pending); + + /* For keymaster we are not going to unload so no need to add it in + * unload app pending list as soon as we identify release ion buffer + * and return . + */ + if (!memcmp(data->client.app_name, "keymaste", strlen("keymaste"))) { + if (data->client.dmabuf) { + /* Each client will get same KM TA loaded handle but + * will allocate separate shared buffer during + * loading of TA, as client can't unload KM TA so we + * will only free out shared buffer and return early + * to avoid any ion buffer leak. + */ + qseecom_vaddr_unmap(data->client.sb_virt, + data->client.sgt, data->client.attach, + data->client.dmabuf); + MAKE_NULL(data->client.sgt, + data->client.attach, data->client.dmabuf); + } + __qseecom_free_tzbuf(&data->sglistinfo_shm); + data->released = true; + return 0; + } + if (data->client.unload_pending) return 0; entry = kzalloc(sizeof(*entry), GFP_KERNEL); diff --git a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_fltrt_i.h b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_fltrt_i.h index 283786236a07..0200541c2764 100644 --- a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_fltrt_i.h +++ b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_fltrt_i.h @@ -44,7 +44,7 @@ enum ipa_fltrt_equations { #define IPA3_0_HW_TBL_ADDR_MASK (127) #define IPA3_0_HW_RULE_BUF_SIZE (256) #define IPA3_0_HW_RULE_START_ALIGNMENT (7) -#define IPA3_0_HW_RULE_PREFETCH_BUF_SIZE (128) +#define IPA3_0_HW_RULE_PREFETCH_BUF_SIZE (256) /* diff --git a/mm/mmap.c b/mm/mmap.c index 2088dc2c7edc..ea0e8b3b75b5 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -3198,10 +3198,9 @@ void exit_mmap(struct mm_struct *mm) (void)__oom_reap_task_mm(mm); set_bit(MMF_OOM_SKIP, &mm->flags); - down_write(&mm->mmap_sem); - up_write(&mm->mmap_sem); } + down_write(&mm->mmap_sem); if (mm->locked_vm) { vma = mm->mmap; while (vma) { @@ -3214,8 +3213,11 @@ void exit_mmap(struct mm_struct *mm) arch_exit_mmap(mm); vma = mm->mmap; - if (!vma) /* Can happen if dup_mmap() received an OOM */ + if (!vma) { + /* Can happen if dup_mmap() received an OOM */ + up_write(&mm->mmap_sem);; return; + } lru_add_drain(); flush_cache_mm(mm); @@ -3226,16 +3228,14 @@ void exit_mmap(struct mm_struct *mm) free_pgtables(&tlb, vma, FIRST_USER_ADDRESS, USER_PGTABLES_CEILING); tlb_finish_mmu(&tlb, 0, -1); - /* - * Walk the list again, actually closing and freeing it, - * with preemption enabled, without holding any MM locks. - */ + /* Walk the list again, actually closing and freeing it. */ while (vma) { if (vma->vm_flags & VM_ACCOUNT) nr_accounted += vma_pages(vma); vma = remove_vma(vma); cond_resched(); } + up_write(&mm->mmap_sem); vm_unacct_memory(nr_accounted); }