tx->iopoll_list) || io_task_work_pending(ctx)) { - u32 tail = ctx->cached_cq_tail; - (void) io_run_local_work_locked(ctx, min_events); if (task_work_pending(current) || list_empty(&ctx->iopoll_list)) { mutex_unlock(&ctx->uring_lock); io_run_task_work(); mutex_lock(&ctx->uring_lock); } /* some requests don't go through iopoll_list */ - if (tail != ctx->cached_cq_tail || list_empty(&ctx->iopoll_list)) + if (list_empty(&ctx->iopoll_list)) break; } ret = io_do_iopoll(ctx, !min_events); if (unlikely(ret < 0)) return ret; if (task_sigpending(current)) return -EINTR; if (need_resched()) break; - - nr_events += ret; - } while (nr_events < min_events); + } return 0; } void io_req_task_complete(struct io_tw_req tw_req, io_tw_token_t tw) -- 2.45.2[PATCH v4 3/5] io_uring: count CQEs in io_iopoll_check()Caleb Sander Mateos undefinedJens Axboe , Christoph Hellwig , Keith Busch , Sagi Grimberg undefined undefined undefined undefined undefined undefined undefined undefined undefined undefinedžu†£t