@@ -3,16 +3,14 @@ Subject: [PATCH] BACKPORT: NFSRDMA
3
3
4
4
Signed-off-by: Vladimir Sokolovsky <
[email protected] >
5
5
---
6
- net/sunrpc/xprtrdma/rpc_rdma.c | 20 ++++++++-
7
- net/sunrpc/xprtrdma/svc_rdma_recvfrom.c | 70 +++++++++++++++++++++++++++++-
8
- net/sunrpc/xprtrdma/svc_rdma_sendto.c | 8 +++
9
- net/sunrpc/xprtrdma/svc_rdma_transport.c | 18 ++++++++
10
- net/sunrpc/xprtrdma/transport.c | 36 ++++++++++++++--
11
- net/sunrpc/xprtrdma/verbs.c | 8 ++--
12
- 6 files changed, 148 insertions(+), 12 deletions(-)
6
+ net/sunrpc/xprtrdma/rpc_rdma.c | 20 ++++++++-
7
+ net/sunrpc/xprtrdma/svc_rdma_recvfrom.c | 70 ++ +++++++++++++++++++++++++++++-
8
+ net/sunrpc/xprtrdma/svc_rdma_sendto.c | 12 +++ +++
9
+ net/sunrpc/xprtrdma/svc_rdma_transport.c | 18 ++++++++
10
+ net/sunrpc/xprtrdma/transport.c | 36 ++++++++++++++--
11
+ net/sunrpc/xprtrdma/verbs.c | 8 ++--
12
+ 6 files changed, 152 insertions(+), 12 deletions(-)
13
13
14
- diff --git a/net/sunrpc/xprtrdma/rpc_rdma.c b/net/sunrpc/xprtrdma/rpc_rdma.c
15
- index xxxxxxx..xxxxxxx xxxxxx
16
14
--- a/net/sunrpc/xprtrdma/rpc_rdma.c
17
15
+++ b/net/sunrpc/xprtrdma/rpc_rdma.c
18
16
@@ -49,11 +49,11 @@
@@ -29,7 +27,7 @@ index xxxxxxx..xxxxxxx xxxxxx
29
27
static const char transfertypes[][12] = {
30
28
"pure inline", /* no chunks */
31
29
" read chunk", /* some argument via rdma read */
32
- @@ -361,9 +361,17 @@ rpcrdma_inline_pullup(struct rpc_rqst *rqst, int pad)
30
+ @@ -361,9 +361,17 @@ rpcrdma_inline_pullup(struct rpc_rqst *r
33
31
curlen = copy_len;
34
32
dprintk("RPC: %s: page %d destp 0x%p len %d curlen %d\n",
35
33
__func__, i, destp, copy_len, curlen);
@@ -47,7 +45,7 @@ index xxxxxxx..xxxxxxx xxxxxx
47
45
rqst->rq_svec[0].iov_len += curlen;
48
46
destp += curlen;
49
47
copy_len -= curlen;
50
- @@ -649,10 +657,18 @@ rpcrdma_inline_fixup(struct rpc_rqst *rqst, char *srcp, int copy_len, int pad)
48
+ @@ -649,10 +657,18 @@ rpcrdma_inline_fixup(struct rpc_rqst *rq
51
49
dprintk("RPC: %s: page %d"
52
50
" srcp 0x%p len %d curlen %d\n",
53
51
__func__, i, srcp, copy_len, curlen);
@@ -66,11 +64,9 @@ index xxxxxxx..xxxxxxx xxxxxx
66
64
srcp += curlen;
67
65
copy_len -= curlen;
68
66
if (copy_len == 0)
69
- diff --git a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
70
- index xxxxxxx..xxxxxxx xxxxxx
71
67
--- a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
72
68
+++ b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
73
- @@ -93,7 +93,11 @@ static void rdma_build_arg_xdr(struct svc_rqst *rqstp,
69
+ @@ -93,7 +93,11 @@ static void rdma_build_arg_xdr(struct sv
74
70
sge_no++;
75
71
}
76
72
rqstp->rq_respages = &rqstp->rq_pages[sge_no];
@@ -82,7 +78,7 @@ index xxxxxxx..xxxxxxx xxxxxx
82
78
83
79
/* We should never run out of SGE because the limit is defined to
84
80
* support the max allowed RPC data length
85
- @@ -136,7 +140,10 @@ typedef int (*rdma_reader_fn)(struct svcxprt_rdma *xprt,
81
+ @@ -136,7 +140,10 @@ typedef int (*rdma_reader_fn)(struct svc
86
82
int last);
87
83
88
84
/* Issue an RDMA_READ using the local lkey to map the data sink */
@@ -94,7 +90,7 @@ index xxxxxxx..xxxxxxx xxxxxx
94
90
struct svc_rqst *rqstp,
95
91
struct svc_rdma_op_ctxt *head,
96
92
int *page_no,
97
- @@ -144,7 +151,11 @@ static int rdma_read_chunk_lcl(struct svcxprt_rdma *xprt,
93
+ @@ -144,7 +151,11 @@ static int rdma_read_chunk_lcl(struct sv
98
94
u32 rs_handle,
99
95
u32 rs_length,
100
96
u64 rs_offset,
@@ -106,7 +102,7 @@ index xxxxxxx..xxxxxxx xxxxxx
106
102
{
107
103
struct ib_send_wr read_wr;
108
104
int pages_needed = PAGE_ALIGN(*page_offset + rs_length) >> PAGE_SHIFT;
109
- @@ -168,7 +179,9 @@ static int rdma_read_chunk_lcl(struct svcxprt_rdma *xprt,
105
+ @@ -168,7 +179,9 @@ static int rdma_read_chunk_lcl(struct sv
110
106
if (!pg_off)
111
107
head->count++;
112
108
rqstp->rq_respages = &rqstp->rq_arg.pages[pg_no+1];
@@ -116,7 +112,7 @@ index xxxxxxx..xxxxxxx xxxxxx
116
112
ctxt->sge[pno].addr =
117
113
ib_dma_map_page(xprt->sc_cm_id->device,
118
114
head->arg.pages[pg_no], pg_off,
119
- @@ -229,7 +242,10 @@ static int rdma_read_chunk_lcl(struct svcxprt_rdma *xprt,
115
+ @@ -229,7 +242,10 @@ static int rdma_read_chunk_lcl(struct sv
120
116
}
121
117
122
118
/* Issue an RDMA_READ using an FRMR to map the data sink */
@@ -128,7 +124,7 @@ index xxxxxxx..xxxxxxx xxxxxx
128
124
struct svc_rqst *rqstp,
129
125
struct svc_rdma_op_ctxt *head,
130
126
int *page_no,
131
- @@ -237,7 +253,11 @@ static int rdma_read_chunk_frmr(struct svcxprt_rdma *xprt,
127
+ @@ -237,7 +253,11 @@ static int rdma_read_chunk_frmr(struct s
132
128
u32 rs_handle,
133
129
u32 rs_length,
134
130
u64 rs_offset,
@@ -140,7 +136,7 @@ index xxxxxxx..xxxxxxx xxxxxx
140
136
{
141
137
struct ib_send_wr read_wr;
142
138
struct ib_send_wr inv_wr;
143
- @@ -273,7 +293,9 @@ static int rdma_read_chunk_frmr(struct svcxprt_rdma *xprt,
139
+ @@ -273,7 +293,9 @@ static int rdma_read_chunk_frmr(struct s
144
140
if (!pg_off)
145
141
head->count++;
146
142
rqstp->rq_respages = &rqstp->rq_arg.pages[pg_no+1];
@@ -150,7 +146,7 @@ index xxxxxxx..xxxxxxx xxxxxx
150
146
frmr->page_list->page_list[pno] =
151
147
ib_dma_map_page(xprt->sc_cm_id->device,
152
148
head->arg.pages[pg_no], 0,
153
- @@ -365,24 +387,50 @@ static int rdma_read_chunk_frmr(struct svcxprt_rdma *xprt,
149
+ @@ -365,24 +387,50 @@ static int rdma_read_chunk_frmr(struct s
154
150
return ret;
155
151
}
156
152
@@ -201,7 +197,7 @@ index xxxxxxx..xxxxxxx xxxxxx
201
197
return -EINVAL;
202
198
203
199
/* The request is completed when the RDMA_READs complete. The
204
- @@ -398,27 +446,43 @@ static int rdma_read_chunks(struct svcxprt_rdma *xprt,
200
+ @@ -398,27 +446,43 @@ static int rdma_read_chunks(struct svcxp
205
201
head->arg.len = rqstp->rq_arg.len;
206
202
head->arg.buflen = rqstp->rq_arg.buflen;
207
203
@@ -245,7 +241,7 @@ index xxxxxxx..xxxxxxx xxxxxx
245
241
if (ret < 0)
246
242
goto err;
247
243
byte_count -= ret;
248
- @@ -483,7 +547,9 @@ static int rdma_read_complete(struct svc_rqst *rqstp,
244
+ @@ -483,7 +547,9 @@ static int rdma_read_complete(struct svc
249
245
250
246
/* rq_respages starts after the last arg page */
251
247
rqstp->rq_respages = &rqstp->rq_arg.pages[page_no];
@@ -255,11 +251,9 @@ index xxxxxxx..xxxxxxx xxxxxx
255
251
256
252
/* Rebuild rq_arg head and tail. */
257
253
rqstp->rq_arg.head[0] = head->arg.head[0];
258
- diff --git a/net/sunrpc/xprtrdma/svc_rdma_sendto.c b/net/sunrpc/xprtrdma/svc_rdma_sendto.c
259
- index xxxxxxx..xxxxxxx xxxxxx
260
254
--- a/net/sunrpc/xprtrdma/svc_rdma_sendto.c
261
255
+++ b/net/sunrpc/xprtrdma/svc_rdma_sendto.c
262
- @@ -375,7 +375,9 @@ static int send_reply(struct svcxprt_rdma *rdma,
256
+ @@ -375,7 +375,9 @@ static int send_reply(struct svcxprt_rdm
263
257
int sge_no;
264
258
int sge_bytes;
265
259
int page_no;
@@ -269,7 +263,7 @@ index xxxxxxx..xxxxxxx xxxxxx
269
263
int ret;
270
264
271
265
/* Post a recv buffer to handle another request. */
272
- @@ -427,8 +429,12 @@ static int send_reply(struct svcxprt_rdma *rdma,
266
+ @@ -427,8 +429,12 @@ static int send_reply(struct svcxprt_rdm
273
267
* respages array. They are our pages until the I/O
274
268
* completes.
275
269
*/
@@ -282,7 +276,7 @@ index xxxxxxx..xxxxxxx xxxxxx
282
276
ctxt->pages[page_no+1] = rqstp->rq_respages[page_no];
283
277
ctxt->count++;
284
278
rqstp->rq_respages[page_no] = NULL;
285
- @@ -440,7 +446,9 @@ static int send_reply(struct svcxprt_rdma *rdma,
279
+ @@ -440,7 +446,9 @@ static int send_reply(struct svcxprt_rdm
286
280
if (page_no+1 >= sge_no)
287
281
ctxt->sge[page_no+1].length = 0;
288
282
}
@@ -292,11 +286,21 @@ index xxxxxxx..xxxxxxx xxxxxx
292
286
293
287
BUG_ON(sge_no > rdma->sc_max_sge);
294
288
memset(&send_wr, 0, sizeof send_wr);
295
- diff --git a/net/sunrpc/xprtrdma/svc_rdma_transport.c b/net/sunrpc/xprtrdma/svc_rdma_transport.c
296
- index xxxxxxx..xxxxxxx xxxxxx
289
+ @@ -509,7 +517,11 @@ int svc_rdma_sendto(struct svc_rqst *rqs
290
+ inline_bytes = rqstp->rq_res.len;
291
+
292
+ /* Create the RDMA response header */
293
+ + #ifdef HAVE_SVC_RDMA_GET_PAGE
294
+ res_page = svc_rdma_get_page();
295
+ + #else
296
+ + res_page = alloc_page(GFP_KERNEL | __GFP_NOFAIL);
297
+ + #endif
298
+ rdma_resp = page_address(res_page);
299
+ reply_ary = svc_rdma_get_reply_array(rdma_argp);
300
+ if (reply_ary)
297
301
--- a/net/sunrpc/xprtrdma/svc_rdma_transport.c
298
302
+++ b/net/sunrpc/xprtrdma/svc_rdma_transport.c
299
- @@ -66,7 +66,9 @@ static void dto_tasklet_func(unsigned long data);
303
+ @@ -66,7 +66,9 @@ static void dto_tasklet_func(unsigned lo
300
304
static void svc_rdma_detach(struct svc_xprt *xprt);
301
305
static void svc_rdma_free(struct svc_xprt *xprt);
302
306
static int svc_rdma_has_wspace(struct svc_xprt *xprt);
@@ -306,7 +310,7 @@ index xxxxxxx..xxxxxxx xxxxxx
306
310
static void rq_cq_reap(struct svcxprt_rdma *xprt);
307
311
static void sq_cq_reap(struct svcxprt_rdma *xprt);
308
312
309
- @@ -84,7 +86,9 @@ static struct svc_xprt_ops svc_rdma_ops = {
313
+ @@ -84,7 +86,9 @@ static struct svc_xprt_ops svc_rdma_ops
310
314
.xpo_prep_reply_hdr = svc_rdma_prep_reply_hdr,
311
315
.xpo_has_wspace = svc_rdma_has_wspace,
312
316
.xpo_accept = svc_rdma_accept,
@@ -326,7 +330,7 @@ index xxxxxxx..xxxxxxx xxxxxx
326
330
};
327
331
328
332
struct svc_rdma_op_ctxt *svc_rdma_get_context(struct svcxprt_rdma *xprt)
329
- @@ -461,7 +467,11 @@ static struct svcxprt_rdma *rdma_create_xprt(struct svc_serv *serv,
333
+ @@ -461,7 +467,11 @@ static struct svcxprt_rdma *rdma_create_
330
334
331
335
if (!cma_xprt)
332
336
return NULL;
@@ -338,7 +342,7 @@ index xxxxxxx..xxxxxxx xxxxxx
338
342
INIT_LIST_HEAD(&cma_xprt->sc_accept_q);
339
343
INIT_LIST_HEAD(&cma_xprt->sc_dto_q);
340
344
INIT_LIST_HEAD(&cma_xprt->sc_rq_dto_q);
341
- @@ -970,10 +980,16 @@ static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt)
345
+ @@ -970,10 +980,16 @@ static struct svc_xprt *svc_rdma_accept(
342
346
* NB: iWARP requires remote write access for the data sink
343
347
* of an RDMA_READ. IB does not.
344
348
*/
@@ -355,7 +359,7 @@ index xxxxxxx..xxxxxxx xxxxxx
355
359
}
356
360
357
361
/*
358
- @@ -1205,10 +1221,12 @@ static int svc_rdma_has_wspace(struct svc_xprt *xprt)
362
+ @@ -1205,10 +1221,12 @@ static int svc_rdma_has_wspace(struct sv
359
363
return 1;
360
364
}
361
365
@@ -368,8 +372,6 @@ index xxxxxxx..xxxxxxx xxxxxx
368
372
369
373
/*
370
374
* Attempt to register the kvec representing the RPC memory with the
371
- diff --git a/net/sunrpc/xprtrdma/transport.c b/net/sunrpc/xprtrdma/transport.c
372
- index xxxxxxx..xxxxxxx xxxxxx
373
375
--- a/net/sunrpc/xprtrdma/transport.c
374
376
+++ b/net/sunrpc/xprtrdma/transport.c
375
377
@@ -51,11 +51,13 @@
@@ -387,7 +389,7 @@ index xxxxxxx..xxxxxxx xxxxxx
387
389
# define RPCDBG_FACILITY RPCDBG_TRANS
388
390
#endif
389
391
390
- @@ -75,7 +77,7 @@ static unsigned int xprt_rdma_inline_write_padding;
392
+ @@ -75,7 +77,7 @@ static unsigned int xprt_rdma_inline_wri
391
393
static unsigned int xprt_rdma_memreg_strategy = RPCRDMA_FRMR;
392
394
int xprt_rdma_pad_optimize = 1;
393
395
@@ -396,7 +398,7 @@ index xxxxxxx..xxxxxxx xxxxxx
396
398
397
399
static unsigned int min_slot_table_size = RPCRDMA_MIN_SLOT_TABLE;
398
400
static unsigned int max_slot_table_size = RPCRDMA_MAX_SLOT_TABLE;
399
- @@ -277,8 +279,14 @@ xprt_setup_rdma(struct xprt_create *args)
401
+ @@ -277,8 +279,14 @@ xprt_setup_rdma(struct xprt_create *args
400
402
return ERR_PTR(-EBADF);
401
403
}
402
404
@@ -411,7 +413,7 @@ index xxxxxxx..xxxxxxx xxxxxx
411
413
xprt_rdma_slot_table_entries);
412
414
if (xprt == NULL) {
413
415
dprintk("RPC: %s: couldn't allocate rpcrdma_xprt\n",
414
- @@ -426,8 +434,15 @@ xprt_rdma_set_port(struct rpc_xprt *xprt, u16 port)
416
+ @@ -426,8 +434,15 @@ xprt_rdma_set_port(struct rpc_xprt *xprt
415
417
}
416
418
417
419
static void
@@ -427,7 +429,7 @@ index xxxxxxx..xxxxxxx xxxxxx
427
429
struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt);
428
430
429
431
if (r_xprt->rx_ep.rep_connected != 0) {
430
- @@ -463,15 +478,22 @@ xprt_rdma_allocate(struct rpc_task *task, size_t size)
432
+ @@ -463,15 +478,22 @@ xprt_rdma_allocate(struct rpc_task *task
431
433
struct rpcrdma_regbuf *rb;
432
434
struct rpcrdma_req *req;
433
435
size_t min_size;
@@ -450,7 +452,7 @@ index xxxxxxx..xxxxxxx xxxxxx
450
452
451
453
if (req->rl_rdmabuf == NULL)
452
454
goto out_rdmabuf;
453
- @@ -603,7 +625,11 @@ xprt_rdma_send_request(struct rpc_task *task)
455
+ @@ -603,7 +625,11 @@ xprt_rdma_send_request(struct rpc_task *
454
456
if (rpcrdma_ep_post(&r_xprt->rx_ia, &r_xprt->rx_ep, req))
455
457
goto drop_connection;
456
458
@@ -462,7 +464,7 @@ index xxxxxxx..xxxxxxx xxxxxx
462
464
rqst->rq_bytes_sent = 0;
463
465
return 0;
464
466
465
- @@ -660,7 +686,9 @@ static void xprt_rdma_print_stats(struct rpc_xprt *xprt, struct seq_file *seq)
467
+ @@ -660,7 +686,9 @@ static void xprt_rdma_print_stats(struct
466
468
static struct rpc_xprt_ops xprt_rdma_procs = {
467
469
.reserve_xprt = xprt_reserve_xprt_cong,
468
470
.release_xprt = xprt_release_xprt_cong, /* sunrpc/xprt.c */
@@ -472,7 +474,7 @@ index xxxxxxx..xxxxxxx xxxxxx
472
474
.release_request = xprt_release_rqst_cong, /* ditto */
473
475
.set_retrans_timeout = xprt_set_retrans_timeout_def, /* ditto */
474
476
.rpcbind = rpcb_getport_async, /* sunrpc/rpcb_clnt.c */
475
- @@ -687,7 +715,7 @@ static void __exit xprt_rdma_cleanup(void)
477
+ @@ -687,7 +715,7 @@ static void __exit xprt_rdma_cleanup(voi
476
478
int rc;
477
479
478
480
dprintk("RPCRDMA Module Removed, deregister RPC RDMA transport\n");
@@ -490,8 +492,6 @@ index xxxxxxx..xxxxxxx xxxxxx
490
492
if (!sunrpc_table_header)
491
493
sunrpc_table_header = register_sysctl_table(sunrpc_table);
492
494
#endif
493
- diff --git a/net/sunrpc/xprtrdma/verbs.c b/net/sunrpc/xprtrdma/verbs.c
494
- index xxxxxxx..xxxxxxx xxxxxx
495
495
--- a/net/sunrpc/xprtrdma/verbs.c
496
496
+++ b/net/sunrpc/xprtrdma/verbs.c
497
497
@@ -58,7 +58,7 @@
@@ -512,7 +512,7 @@ index xxxxxxx..xxxxxxx xxxxxx
512
512
static const char * const conn[] = {
513
513
"address resolved",
514
514
"address error",
515
- @@ -423,7 +423,7 @@ rpcrdma_conn_upcall(struct rdma_cm_id *id, struct rdma_cm_event *event)
515
+ @@ -423,7 +423,7 @@ rpcrdma_conn_upcall(struct rdma_cm_id *i
516
516
struct rpcrdma_xprt *xprt = id->context;
517
517
struct rpcrdma_ia *ia = &xprt->rx_ia;
518
518
struct rpcrdma_ep *ep = &xprt->rx_ep;
0 commit comments