Commit 70b0d6b0a199 ("tee: optee: Fix supplicant wait loop") made the client wait as killable so it can be interrupted during shutdown or after a supplicant crash. This changes the original lifetime expectations: the client task can now terminate while the supplicant is still processing its request.
If the client exits first it removes the request from its queue and kfree()s it, while the request ID remains in supp->idr. A subsequent lookup on the supplicant path then dereferences freed memory, leading to a use-after-free.
Serialise access to the request with supp->mutex:
* Hold supp->mutex in optee_supp_recv() and optee_supp_send() while looking up and touching the request. * Let optee_supp_thrd_req() notice that the client has terminated and signal optee_supp_send() accordingly.
With these changes the request cannot be freed while the supplicant still has a reference, eliminating the race.
Fixes: 70b0d6b0a199 ("tee: optee: Fix supplicant wait loop") Signed-off-by: Amirreza Zarrabi amirreza.zarrabi@oss.qualcomm.com --- drivers/tee/optee/supp.c | 114 ++++++++++++++++++++++++++++++++--------------- 1 file changed, 77 insertions(+), 37 deletions(-)
diff --git a/drivers/tee/optee/supp.c b/drivers/tee/optee/supp.c index d0f397c90242..62c9c75f48a6 100644 --- a/drivers/tee/optee/supp.c +++ b/drivers/tee/optee/supp.c @@ -9,6 +9,7 @@
struct optee_supp_req { struct list_head link; + int id;
bool in_queue; u32 func; @@ -19,6 +20,9 @@ struct optee_supp_req { struct completion c; };
+/* It is temporary request used for invalid pending request in supp->idr. */ +static struct optee_supp_req invalid_req; + void optee_supp_init(struct optee_supp *supp) { memset(supp, 0, sizeof(*supp)); @@ -102,6 +106,7 @@ u32 optee_supp_thrd_req(struct tee_context *ctx, u32 func, size_t num_params, mutex_lock(&supp->mutex); list_add_tail(&req->link, &supp->reqs); req->in_queue = true; + req->id = -1; mutex_unlock(&supp->mutex);
/* Tell an eventual waiter there's a new request */ @@ -117,21 +122,40 @@ u32 optee_supp_thrd_req(struct tee_context *ctx, u32 func, size_t num_params, if (wait_for_completion_killable(&req->c)) { mutex_lock(&supp->mutex); if (req->in_queue) { + /* Supplicant has not seen this request yet. */ list_del(&req->link); req->in_queue = false; + + ret = TEEC_ERROR_COMMUNICATION; + } else if (req->id == -1) { + /* + * Supplicant has processed this request. Ignore the + * kill signal for now and submit the result. + */ + ret = req->ret; + } else { + /* + * Supplicant is in the middle of processing this + * request. Replace req with invalid_req so that the ID + * remains busy, causing optee_supp_send() to fail on + * the next call to supp_pop_req() with this ID. + */ + idr_replace(&supp->idr, &invalid_req, req->id); + ret = TEEC_ERROR_COMMUNICATION; } + + kfree(req); mutex_unlock(&supp->mutex); - req->ret = TEEC_ERROR_COMMUNICATION; + } else { + ret = req->ret; + kfree(req); }
- ret = req->ret; - kfree(req); - return ret; }
static struct optee_supp_req *supp_pop_entry(struct optee_supp *supp, - int num_params, int *id) + int num_params) { struct optee_supp_req *req;
@@ -153,8 +177,8 @@ static struct optee_supp_req *supp_pop_entry(struct optee_supp *supp, return ERR_PTR(-EINVAL); }
- *id = idr_alloc(&supp->idr, req, 1, 0, GFP_KERNEL); - if (*id < 0) + req->id = idr_alloc(&supp->idr, req, 1, 0, GFP_KERNEL); + if (req->id < 0) return ERR_PTR(-ENOMEM);
list_del(&req->link); @@ -214,7 +238,6 @@ int optee_supp_recv(struct tee_context *ctx, u32 *func, u32 *num_params, struct optee *optee = tee_get_drvdata(teedev); struct optee_supp *supp = &optee->supp; struct optee_supp_req *req = NULL; - int id; size_t num_meta; int rc;
@@ -223,16 +246,45 @@ int optee_supp_recv(struct tee_context *ctx, u32 *func, u32 *num_params, return rc;
while (true) { - mutex_lock(&supp->mutex); - req = supp_pop_entry(supp, *num_params - num_meta, &id); - mutex_unlock(&supp->mutex); + scoped_guard(mutex, &supp->mutex) { + req = supp_pop_entry(supp, *num_params - num_meta); + if (!req) + goto wait_for_request;
- if (req) { if (IS_ERR(req)) return PTR_ERR(req); - break; + + /* + * Popped a request: process it while holding the lock, + * so that optee_supp_thrd_req() doesn't pull the + * request out from under us. + */ + + if (num_meta) { + /* + * tee-supplicant support meta parameters -> + * requests can be processed asynchronously. + */ + param->attr = + TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT | + TEE_IOCTL_PARAM_ATTR_META; + param->u.value.a = req->id; + param->u.value.b = 0; + param->u.value.c = 0; + } else { + supp->req_id = req->id; + } + + *func = req->func; + *num_params = req->num_params + num_meta; + memcpy(param + num_meta, req->param, + sizeof(struct tee_param) * req->num_params); }
+ /* Check for the next request in the queue. */ + continue; + +wait_for_request: /* * If we didn't get a request we'll block in * wait_for_completion() to avoid needless spinning. @@ -245,27 +297,6 @@ int optee_supp_recv(struct tee_context *ctx, u32 *func, u32 *num_params, return -ERESTARTSYS; }
- if (num_meta) { - /* - * tee-supplicant support meta parameters -> requsts can be - * processed asynchronously. - */ - param->attr = TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT | - TEE_IOCTL_PARAM_ATTR_META; - param->u.value.a = id; - param->u.value.b = 0; - param->u.value.c = 0; - } else { - mutex_lock(&supp->mutex); - supp->req_id = id; - mutex_unlock(&supp->mutex); - } - - *func = req->func; - *num_params = req->num_params + num_meta; - memcpy(param + num_meta, req->param, - sizeof(struct tee_param) * req->num_params); - return 0; }
@@ -297,12 +328,21 @@ static struct optee_supp_req *supp_pop_req(struct optee_supp *supp, if (!req) return ERR_PTR(-ENOENT);
+ /* optee_supp_thrd_req() already returned to optee. */ + if (req == &invalid_req) { + req = ERR_PTR(-ENOENT); + goto failed_req; + } + if ((num_params - nm) != req->num_params) return ERR_PTR(-EINVAL);
+ req->id = -1; + *num_meta = nm; +failed_req: idr_remove(&supp->idr, id); supp->req_id = -1; - *num_meta = nm; +
return req; } @@ -328,9 +368,8 @@ int optee_supp_send(struct tee_context *ctx, u32 ret, u32 num_params,
mutex_lock(&supp->mutex); req = supp_pop_req(supp, num_params, param, &num_meta); - mutex_unlock(&supp->mutex); - if (IS_ERR(req)) { + mutex_unlock(&supp->mutex); /* Something is wrong, let supplicant restart. */ return PTR_ERR(req); } @@ -358,6 +397,7 @@ int optee_supp_send(struct tee_context *ctx, u32 ret, u32 num_params,
/* Let the requesting thread continue */ complete(&req->c); + mutex_unlock(&supp->mutex);
return 0; }
--- base-commit: 3be1a7a31fbda82f3604b6c31e4f390110de1b46 change-id: 20250604-fix-use-after-free-8ff1b5d5d774
Best regards,
op-tee@lists.trustedfirmware.org