ional MRs for the registrations and the - * invalidation. + * If the device uses MRs to perform RDMA READ or WRITE operations, + * or if data integrity is enabled, account for registration and + * invalidation work requests. */ if (attr->create_flags & IB_QP_CREATE_INTEGRITY_EN || rdma_rw_can_use_mr(dev, attr->port_num)) - factor += 2; /* inv + reg */ + factor += 2; /* reg + inv */ attr->cap.max_send_wr += factor * attr->cap.max_rdma_ctxs; /* - * But maybe we were just too high in the sky and the device doesn't - * even support all we need, and we'll have to live with what we get.. + * The device might not support all we need, and we'll have to + * live with what we get. */ attr->cap.max_send_wr = min_t(u32, attr->cap.max_send_wr, dev->attrs.max_qp_wr); diff --git a/include/rdma/rw.h b/include/rdma/rw.h index d606cac482338..9a8f4b76ce588 100644 --- a/include/rdma/rw.h +++ b/include/rdma/rw.h @@ -66,6 +66,8 @@ int rdma_rw_ctx_post(struct rdma_rw_ctx *ctx, struct ib_qp *qp, u32 port_num, unsigned int rdma_rw_mr_factor(struct ib_device *device, u32 port_num, unsigned int maxpages); +unsigned int rdma_rw_max_send_wr(struct ib_device *dev, u32 port_num, + unsigned int max_rdma_ctxs, u32 create_flags); void rdma_rw_init_qp(struct ib_device *dev, struct ib_qp_init_attr *attr); int rdma_rw_init_mrs(struct ib_qp *qp, struct ib_qp_init_attr *attr); void rdma_rw_cleanup_mrs(struct ib_qp *qp); diff --git a/net/sunrpc/xprtrdma/svc_rdma_transport.c b/net/sunrpc/xprtrdma/svc_rdma_transport.c index b7b318ad25c42..9b623849723ed 100644 --- a/net/sunrpc/xprtrdma/svc_rdma_transport.c +++ b/net/sunrpc/xprtrdma/svc_rdma_transport.c @@ -462,7 +462,10 @@ static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt) newxprt->sc_max_bc_requests = 2; } - /* Arbitrary estimate of the needed number of rdma_rw contexts. + /* Estimate the needed number of rdma_rw contexts. The maximum + * Read and Write chunks have one segment each. Each request + * can involve one Read chunk and either a Write chunk or Reply + * chunk; thus a factor of three. */ maxpayload = min(xprt->xpt_server->sv_max_payload, RPCSVC_MAXPAYLOAD_RDMA); @@ -470,7 +473,8 @@ static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt) rdma_rw_mr_factor(dev, newxprt->sc_port_num, maxpayload >> PAGE_SHIFT); - newxprt->sc_sq_depth = rq_depth + ctxts; + newxprt->sc_sq_depth = rq_depth + + rdma_rw_max_send_wr(dev, newxprt->sc_port_num, ctxts, 0); if (newxprt->sc_sq_depth > dev->attrs.max_qp_wr) newxprt->sc_sq_depth = dev->attrs.max_qp_wr; atomic_set(&newxprt->sc_sq_avail, newxprt->sc_sq_depth); -- 2.51.0[PATCH 6.19 489/781] RDMA/core: add rdma_rw_max_sge() helper for SQ sizingGreg Kroah-Hartman undefinedstable@vger.kernel.org undefined undefined undefined undefined undefined undefinedŽA†”c