From: Ira Weiny ira.weiny@intel.com
The kernel pages used by shm_get_kernel_pages() are allocated using GFP_KERNEL through the following call stack:
trusted_instantiate() trusted_payload_alloc() -> GFP_KERNEL <trusted key op> tee_shm_register_kernel_buf() register_shm_helper() shm_get_kernel_pages()
Where <trusted key op> is one of:
trusted_key_unseal() trusted_key_get_random() trusted_key_seal()
Remove the vmalloc page support from shm_get_kernel_pages(). Replace with a warn on once.
Cc: Jens Wiklander jens.wiklander@linaro.org Cc: Al Viro viro@zeniv.linux.org.uk Cc: "Fabio M. De Francesco" fmdefrancesco@gmail.com Cc: Christoph Hellwig hch@lst.de Cc: Linus Torvalds torvalds@linux-foundation.org Signed-off-by: Ira Weiny ira.weiny@intel.com
--- Jens I went with the suggestion from Linus and Christoph and rejected vmalloc addresses. I did not hear back from you regarding Linus' question if the vmalloc page support was required by an up coming patch set or not. So I assumed it was something out of tree. --- drivers/tee/tee_shm.c | 36 ++++++++++++------------------------ 1 file changed, 12 insertions(+), 24 deletions(-)
diff --git a/drivers/tee/tee_shm.c b/drivers/tee/tee_shm.c index 27295bda3e0b..527a6eabc03e 100644 --- a/drivers/tee/tee_shm.c +++ b/drivers/tee/tee_shm.c @@ -24,37 +24,25 @@ static void shm_put_kernel_pages(struct page **pages, size_t page_count) static int shm_get_kernel_pages(unsigned long start, size_t page_count, struct page **pages) { + struct kvec *kiov; size_t n; int rc;
- if (is_vmalloc_addr((void *)start)) { - struct page *page; - - for (n = 0; n < page_count; n++) { - page = vmalloc_to_page((void *)(start + PAGE_SIZE * n)); - if (!page) - return -ENOMEM; - - get_page(page); - pages[n] = page; - } - rc = page_count; - } else { - struct kvec *kiov; - - kiov = kcalloc(page_count, sizeof(*kiov), GFP_KERNEL); - if (!kiov) - return -ENOMEM; + if (WARN_ON_ONCE(is_vmalloc_addr((void *)start))) + return -EINVAL;
- for (n = 0; n < page_count; n++) { - kiov[n].iov_base = (void *)(start + n * PAGE_SIZE); - kiov[n].iov_len = PAGE_SIZE; - } + kiov = kcalloc(page_count, sizeof(*kiov), GFP_KERNEL); + if (!kiov) + return -ENOMEM;
- rc = get_kernel_pages(kiov, page_count, 0, pages); - kfree(kiov); + for (n = 0; n < page_count; n++) { + kiov[n].iov_base = (void *)(start + n * PAGE_SIZE); + kiov[n].iov_len = PAGE_SIZE; }
+ rc = get_kernel_pages(kiov, page_count, 0, pages); + kfree(kiov); + return rc; }