ceph: negotiate authentication protocol; implement AUTH_NONE protocol
[linux-3.10.git] / fs / ceph / osd_client.c
1 #include "ceph_debug.h"
2
3 #include <linux/err.h>
4 #include <linux/highmem.h>
5 #include <linux/mm.h>
6 #include <linux/pagemap.h>
7 #include <linux/slab.h>
8 #include <linux/uaccess.h>
9
10 #include "super.h"
11 #include "osd_client.h"
12 #include "messenger.h"
13 #include "decode.h"
14 #include "auth.h"
15
16 const static struct ceph_connection_operations osd_con_ops;
17
18 static void kick_requests(struct ceph_osd_client *osdc, struct ceph_osd *osd);
19
20 /*
21  * Implement client access to distributed object storage cluster.
22  *
23  * All data objects are stored within a cluster/cloud of OSDs, or
24  * "object storage devices."  (Note that Ceph OSDs have _nothing_ to
25  * do with the T10 OSD extensions to SCSI.)  Ceph OSDs are simply
26  * remote daemons serving up and coordinating consistent and safe
27  * access to storage.
28  *
29  * Cluster membership and the mapping of data objects onto storage devices
30  * are described by the osd map.
31  *
32  * We keep track of pending OSD requests (read, write), resubmit
33  * requests to different OSDs when the cluster topology/data layout
34  * change, or retry the affected requests when the communications
35  * channel with an OSD is reset.
36  */
37
38 /*
39  * calculate the mapping of a file extent onto an object, and fill out the
40  * request accordingly.  shorten extent as necessary if it crosses an
41  * object boundary.
42  *
43  * fill osd op in request message.
44  */
45 static void calc_layout(struct ceph_osd_client *osdc,
46                         struct ceph_vino vino, struct ceph_file_layout *layout,
47                         u64 off, u64 *plen,
48                         struct ceph_osd_request *req)
49 {
50         struct ceph_osd_request_head *reqhead = req->r_request->front.iov_base;
51         struct ceph_osd_op *op = (void *)(reqhead + 1);
52         u64 orig_len = *plen;
53         u64 objoff, objlen;    /* extent in object */
54         u64 bno;
55
56         reqhead->snapid = cpu_to_le64(vino.snap);
57
58         /* object extent? */
59         ceph_calc_file_object_mapping(layout, off, plen, &bno,
60                                       &objoff, &objlen);
61         if (*plen < orig_len)
62                 dout(" skipping last %llu, final file extent %llu~%llu\n",
63                      orig_len - *plen, off, *plen);
64
65         sprintf(req->r_oid, "%llx.%08llx", vino.ino, bno);
66         req->r_oid_len = strlen(req->r_oid);
67
68         op->extent.offset = cpu_to_le64(objoff);
69         op->extent.length = cpu_to_le64(objlen);
70         req->r_num_pages = calc_pages_for(off, *plen);
71
72         dout("calc_layout %s (%d) %llu~%llu (%d pages)\n",
73              req->r_oid, req->r_oid_len, objoff, objlen, req->r_num_pages);
74 }
75
76
77 /*
78  * requests
79  */
80 void ceph_osdc_put_request(struct ceph_osd_request *req)
81 {
82         dout("osdc put_request %p %d -> %d\n", req, atomic_read(&req->r_ref),
83              atomic_read(&req->r_ref)-1);
84         BUG_ON(atomic_read(&req->r_ref) <= 0);
85         if (atomic_dec_and_test(&req->r_ref)) {
86                 if (req->r_request)
87                         ceph_msg_put(req->r_request);
88                 if (req->r_reply)
89                         ceph_msg_put(req->r_reply);
90                 if (req->r_own_pages)
91                         ceph_release_page_vector(req->r_pages,
92                                                  req->r_num_pages);
93                 ceph_put_snap_context(req->r_snapc);
94                 if (req->r_mempool)
95                         mempool_free(req, req->r_osdc->req_mempool);
96                 else
97                         kfree(req);
98         }
99 }
100
101 /*
102  * build new request AND message, calculate layout, and adjust file
103  * extent as needed.
104  *
105  * if the file was recently truncated, we include information about its
106  * old and new size so that the object can be updated appropriately.  (we
107  * avoid synchronously deleting truncated objects because it's slow.)
108  *
109  * if @do_sync, include a 'startsync' command so that the osd will flush
110  * data quickly.
111  */
112 struct ceph_osd_request *ceph_osdc_new_request(struct ceph_osd_client *osdc,
113                                                struct ceph_file_layout *layout,
114                                                struct ceph_vino vino,
115                                                u64 off, u64 *plen,
116                                                int opcode, int flags,
117                                                struct ceph_snap_context *snapc,
118                                                int do_sync,
119                                                u32 truncate_seq,
120                                                u64 truncate_size,
121                                                struct timespec *mtime,
122                                                bool use_mempool, int num_reply)
123 {
124         struct ceph_osd_request *req;
125         struct ceph_msg *msg;
126         struct ceph_osd_request_head *head;
127         struct ceph_osd_op *op;
128         void *p;
129         int do_trunc = truncate_seq && (off + *plen > truncate_size);
130         int num_op = 1 + do_sync + do_trunc;
131         size_t msg_size = sizeof(*head) + num_op*sizeof(*op);
132         int err, i;
133         u64 prevofs;
134
135         if (use_mempool) {
136                 req = mempool_alloc(osdc->req_mempool, GFP_NOFS);
137                 memset(req, 0, sizeof(*req));
138         } else {
139                 req = kzalloc(sizeof(*req), GFP_NOFS);
140         }
141         if (req == NULL)
142                 return ERR_PTR(-ENOMEM);
143
144         err = ceph_msgpool_resv(&osdc->msgpool_op_reply, num_reply);
145         if (err) {
146                 ceph_osdc_put_request(req);
147                 return ERR_PTR(-ENOMEM);
148         }
149
150         req->r_osdc = osdc;
151         req->r_mempool = use_mempool;
152         atomic_set(&req->r_ref, 1);
153         init_completion(&req->r_completion);
154         init_completion(&req->r_safe_completion);
155         INIT_LIST_HEAD(&req->r_unsafe_item);
156         req->r_flags = flags;
157
158         WARN_ON((flags & (CEPH_OSD_FLAG_READ|CEPH_OSD_FLAG_WRITE)) == 0);
159
160         /* create message; allow space for oid */
161         msg_size += 40;
162         if (snapc)
163                 msg_size += sizeof(u64) * snapc->num_snaps;
164         if (use_mempool)
165                 msg = ceph_msgpool_get(&osdc->msgpool_op, 0);
166         else
167                 msg = ceph_msg_new(CEPH_MSG_OSD_OP, msg_size, 0, 0, NULL);
168         if (IS_ERR(msg)) {
169                 ceph_msgpool_resv(&osdc->msgpool_op_reply, num_reply);
170                 ceph_osdc_put_request(req);
171                 return ERR_PTR(PTR_ERR(msg));
172         }
173         msg->hdr.type = cpu_to_le16(CEPH_MSG_OSD_OP);
174         memset(msg->front.iov_base, 0, msg->front.iov_len);
175         head = msg->front.iov_base;
176         op = (void *)(head + 1);
177         p = (void *)(op + num_op);
178
179         req->r_request = msg;
180         req->r_snapc = ceph_get_snap_context(snapc);
181
182         head->client_inc = cpu_to_le32(1); /* always, for now. */
183         head->flags = cpu_to_le32(flags);
184         if (flags & CEPH_OSD_FLAG_WRITE)
185                 ceph_encode_timespec(&head->mtime, mtime);
186         head->num_ops = cpu_to_le16(num_op);
187         op->op = cpu_to_le16(opcode);
188
189         /* calculate max write size */
190         calc_layout(osdc, vino, layout, off, plen, req);
191         req->r_file_layout = *layout;  /* keep a copy */
192
193         if (flags & CEPH_OSD_FLAG_WRITE) {
194                 req->r_request->hdr.data_off = cpu_to_le16(off);
195                 req->r_request->hdr.data_len = cpu_to_le32(*plen);
196                 op->payload_len = cpu_to_le32(*plen);
197         }
198
199         /* fill in oid */
200         head->object_len = cpu_to_le32(req->r_oid_len);
201         memcpy(p, req->r_oid, req->r_oid_len);
202         p += req->r_oid_len;
203
204         /* additional ops */
205         if (do_trunc) {
206                 op++;
207                 op->op = cpu_to_le16(opcode == CEPH_OSD_OP_READ ?
208                              CEPH_OSD_OP_MASKTRUNC : CEPH_OSD_OP_SETTRUNC);
209                 op->trunc.truncate_seq = cpu_to_le32(truncate_seq);
210                 prevofs = le64_to_cpu((op-1)->extent.offset);
211                 op->trunc.truncate_size = cpu_to_le64(truncate_size -
212                                                       (off-prevofs));
213         }
214         if (do_sync) {
215                 op++;
216                 op->op = cpu_to_le16(CEPH_OSD_OP_STARTSYNC);
217         }
218         if (snapc) {
219                 head->snap_seq = cpu_to_le64(snapc->seq);
220                 head->num_snaps = cpu_to_le32(snapc->num_snaps);
221                 for (i = 0; i < snapc->num_snaps; i++) {
222                         put_unaligned_le64(snapc->snaps[i], p);
223                         p += sizeof(u64);
224                 }
225         }
226
227         BUG_ON(p > msg->front.iov_base + msg->front.iov_len);
228         return req;
229 }
230
231 /*
232  * We keep osd requests in an rbtree, sorted by ->r_tid.
233  */
234 static void __insert_request(struct ceph_osd_client *osdc,
235                              struct ceph_osd_request *new)
236 {
237         struct rb_node **p = &osdc->requests.rb_node;
238         struct rb_node *parent = NULL;
239         struct ceph_osd_request *req = NULL;
240
241         while (*p) {
242                 parent = *p;
243                 req = rb_entry(parent, struct ceph_osd_request, r_node);
244                 if (new->r_tid < req->r_tid)
245                         p = &(*p)->rb_left;
246                 else if (new->r_tid > req->r_tid)
247                         p = &(*p)->rb_right;
248                 else
249                         BUG();
250         }
251
252         rb_link_node(&new->r_node, parent, p);
253         rb_insert_color(&new->r_node, &osdc->requests);
254 }
255
256 static struct ceph_osd_request *__lookup_request(struct ceph_osd_client *osdc,
257                                                  u64 tid)
258 {
259         struct ceph_osd_request *req;
260         struct rb_node *n = osdc->requests.rb_node;
261
262         while (n) {
263                 req = rb_entry(n, struct ceph_osd_request, r_node);
264                 if (tid < req->r_tid)
265                         n = n->rb_left;
266                 else if (tid > req->r_tid)
267                         n = n->rb_right;
268                 else
269                         return req;
270         }
271         return NULL;
272 }
273
274 static struct ceph_osd_request *
275 __lookup_request_ge(struct ceph_osd_client *osdc,
276                     u64 tid)
277 {
278         struct ceph_osd_request *req;
279         struct rb_node *n = osdc->requests.rb_node;
280
281         while (n) {
282                 req = rb_entry(n, struct ceph_osd_request, r_node);
283                 if (tid < req->r_tid) {
284                         if (!n->rb_left)
285                                 return req;
286                         n = n->rb_left;
287                 } else if (tid > req->r_tid) {
288                         n = n->rb_right;
289                 } else {
290                         return req;
291                 }
292         }
293         return NULL;
294 }
295
296
297 /*
298  * If the osd connection drops, we need to resubmit all requests.
299  */
300 static void osd_reset(struct ceph_connection *con)
301 {
302         struct ceph_osd *osd = con->private;
303         struct ceph_osd_client *osdc;
304
305         if (!osd)
306                 return;
307         dout("osd_reset osd%d\n", osd->o_osd);
308         osdc = osd->o_osdc;
309         osd->o_incarnation++;
310         down_read(&osdc->map_sem);
311         kick_requests(osdc, osd);
312         up_read(&osdc->map_sem);
313 }
314
315 /*
316  * Track open sessions with osds.
317  */
318 static struct ceph_osd *create_osd(struct ceph_osd_client *osdc)
319 {
320         struct ceph_osd *osd;
321
322         osd = kzalloc(sizeof(*osd), GFP_NOFS);
323         if (!osd)
324                 return NULL;
325
326         atomic_set(&osd->o_ref, 1);
327         osd->o_osdc = osdc;
328         INIT_LIST_HEAD(&osd->o_requests);
329         osd->o_incarnation = 1;
330
331         ceph_con_init(osdc->client->msgr, &osd->o_con);
332         osd->o_con.private = osd;
333         osd->o_con.ops = &osd_con_ops;
334         osd->o_con.peer_name.type = CEPH_ENTITY_TYPE_OSD;
335
336         return osd;
337 }
338
339 static struct ceph_osd *get_osd(struct ceph_osd *osd)
340 {
341         if (atomic_inc_not_zero(&osd->o_ref)) {
342                 dout("get_osd %p %d -> %d\n", osd, atomic_read(&osd->o_ref)-1,
343                      atomic_read(&osd->o_ref));
344                 return osd;
345         } else {
346                 dout("get_osd %p FAIL\n", osd);
347                 return NULL;
348         }
349 }
350
351 static void put_osd(struct ceph_osd *osd)
352 {
353         dout("put_osd %p %d -> %d\n", osd, atomic_read(&osd->o_ref),
354              atomic_read(&osd->o_ref) - 1);
355         if (atomic_dec_and_test(&osd->o_ref))
356                 kfree(osd);
357 }
358
359 /*
360  * remove an osd from our map
361  */
362 static void remove_osd(struct ceph_osd_client *osdc, struct ceph_osd *osd)
363 {
364         dout("remove_osd %p\n", osd);
365         BUG_ON(!list_empty(&osd->o_requests));
366         rb_erase(&osd->o_node, &osdc->osds);
367         ceph_con_close(&osd->o_con);
368         put_osd(osd);
369 }
370
371 /*
372  * reset osd connect
373  */
374 static int reset_osd(struct ceph_osd_client *osdc, struct ceph_osd *osd)
375 {
376         int ret = 0;
377
378         dout("reset_osd %p osd%d\n", osd, osd->o_osd);
379         if (list_empty(&osd->o_requests)) {
380                 remove_osd(osdc, osd);
381         } else {
382                 ceph_con_close(&osd->o_con);
383                 ceph_con_open(&osd->o_con, &osdc->osdmap->osd_addr[osd->o_osd]);
384                 osd->o_incarnation++;
385         }
386         return ret;
387 }
388
389 static void __insert_osd(struct ceph_osd_client *osdc, struct ceph_osd *new)
390 {
391         struct rb_node **p = &osdc->osds.rb_node;
392         struct rb_node *parent = NULL;
393         struct ceph_osd *osd = NULL;
394
395         while (*p) {
396                 parent = *p;
397                 osd = rb_entry(parent, struct ceph_osd, o_node);
398                 if (new->o_osd < osd->o_osd)
399                         p = &(*p)->rb_left;
400                 else if (new->o_osd > osd->o_osd)
401                         p = &(*p)->rb_right;
402                 else
403                         BUG();
404         }
405
406         rb_link_node(&new->o_node, parent, p);
407         rb_insert_color(&new->o_node, &osdc->osds);
408 }
409
410 static struct ceph_osd *__lookup_osd(struct ceph_osd_client *osdc, int o)
411 {
412         struct ceph_osd *osd;
413         struct rb_node *n = osdc->osds.rb_node;
414
415         while (n) {
416                 osd = rb_entry(n, struct ceph_osd, o_node);
417                 if (o < osd->o_osd)
418                         n = n->rb_left;
419                 else if (o > osd->o_osd)
420                         n = n->rb_right;
421                 else
422                         return osd;
423         }
424         return NULL;
425 }
426
427
428 /*
429  * Register request, assign tid.  If this is the first request, set up
430  * the timeout event.
431  */
432 static void register_request(struct ceph_osd_client *osdc,
433                              struct ceph_osd_request *req)
434 {
435         struct ceph_osd_request_head *head = req->r_request->front.iov_base;
436
437         mutex_lock(&osdc->request_mutex);
438         req->r_tid = ++osdc->last_tid;
439         head->tid = cpu_to_le64(req->r_tid);
440
441         dout("register_request %p tid %lld\n", req, req->r_tid);
442         __insert_request(osdc, req);
443         ceph_osdc_get_request(req);
444         osdc->num_requests++;
445
446         req->r_timeout_stamp =
447                 jiffies + osdc->client->mount_args->osd_timeout*HZ;
448
449         if (osdc->num_requests == 1) {
450                 osdc->timeout_tid = req->r_tid;
451                 dout("  timeout on tid %llu at %lu\n", req->r_tid,
452                      req->r_timeout_stamp);
453                 schedule_delayed_work(&osdc->timeout_work,
454                       round_jiffies_relative(req->r_timeout_stamp - jiffies));
455         }
456         mutex_unlock(&osdc->request_mutex);
457 }
458
459 /*
460  * called under osdc->request_mutex
461  */
462 static void __unregister_request(struct ceph_osd_client *osdc,
463                                  struct ceph_osd_request *req)
464 {
465         dout("__unregister_request %p tid %lld\n", req, req->r_tid);
466         rb_erase(&req->r_node, &osdc->requests);
467         osdc->num_requests--;
468
469         if (req->r_osd) {
470                 /* make sure the original request isn't in flight. */
471                 ceph_con_revoke(&req->r_osd->o_con, req->r_request);
472
473                 list_del_init(&req->r_osd_item);
474                 if (list_empty(&req->r_osd->o_requests))
475                         remove_osd(osdc, req->r_osd);
476                 req->r_osd = NULL;
477         }
478
479         ceph_osdc_put_request(req);
480
481         if (req->r_tid == osdc->timeout_tid) {
482                 if (osdc->num_requests == 0) {
483                         dout("no requests, canceling timeout\n");
484                         osdc->timeout_tid = 0;
485                         cancel_delayed_work(&osdc->timeout_work);
486                 } else {
487                         req = rb_entry(rb_first(&osdc->requests),
488                                        struct ceph_osd_request, r_node);
489                         osdc->timeout_tid = req->r_tid;
490                         dout("rescheduled timeout on tid %llu at %lu\n",
491                              req->r_tid, req->r_timeout_stamp);
492                         schedule_delayed_work(&osdc->timeout_work,
493                               round_jiffies_relative(req->r_timeout_stamp -
494                                                      jiffies));
495                 }
496         }
497 }
498
499 /*
500  * Cancel a previously queued request message
501  */
502 static void __cancel_request(struct ceph_osd_request *req)
503 {
504         if (req->r_sent) {
505                 ceph_con_revoke(&req->r_osd->o_con, req->r_request);
506                 req->r_sent = 0;
507         }
508 }
509
510 /*
511  * Pick an osd (the first 'up' osd in the pg), allocate the osd struct
512  * (as needed), and set the request r_osd appropriately.  If there is
513  * no up osd, set r_osd to NULL.
514  *
515  * Return 0 if unchanged, 1 if changed, or negative on error.
516  *
517  * Caller should hold map_sem for read and request_mutex.
518  */
519 static int __map_osds(struct ceph_osd_client *osdc,
520                       struct ceph_osd_request *req)
521 {
522         struct ceph_osd_request_head *reqhead = req->r_request->front.iov_base;
523         struct ceph_pg pgid;
524         int o = -1;
525         int err;
526         struct ceph_osd *newosd = NULL;
527
528         dout("map_osds %p tid %lld\n", req, req->r_tid);
529         err = ceph_calc_object_layout(&reqhead->layout, req->r_oid,
530                                       &req->r_file_layout, osdc->osdmap);
531         if (err)
532                 return err;
533         pgid = reqhead->layout.ol_pgid;
534         o = ceph_calc_pg_primary(osdc->osdmap, pgid);
535
536         if ((req->r_osd && req->r_osd->o_osd == o &&
537              req->r_sent >= req->r_osd->o_incarnation) ||
538             (req->r_osd == NULL && o == -1))
539                 return 0;  /* no change */
540
541         dout("map_osds tid %llu pgid %d.%x osd%d (was osd%d)\n",
542              req->r_tid, le32_to_cpu(pgid.pool), le16_to_cpu(pgid.ps), o,
543              req->r_osd ? req->r_osd->o_osd : -1);
544
545         if (req->r_osd) {
546                 __cancel_request(req);
547                 list_del_init(&req->r_osd_item);
548                 if (list_empty(&req->r_osd->o_requests)) {
549                         /* try to re-use r_osd if possible */
550                         newosd = get_osd(req->r_osd);
551                         remove_osd(osdc, newosd);
552                 }
553                 req->r_osd = NULL;
554         }
555
556         req->r_osd = __lookup_osd(osdc, o);
557         if (!req->r_osd && o >= 0) {
558                 if (newosd) {
559                         req->r_osd = newosd;
560                         newosd = NULL;
561                 } else {
562                         err = -ENOMEM;
563                         req->r_osd = create_osd(osdc);
564                         if (!req->r_osd)
565                                 goto out;
566                 }
567
568                 dout("map_osds osd %p is osd%d\n", req->r_osd, o);
569                 req->r_osd->o_osd = o;
570                 req->r_osd->o_con.peer_name.num = cpu_to_le64(o);
571                 __insert_osd(osdc, req->r_osd);
572
573                 ceph_con_open(&req->r_osd->o_con, &osdc->osdmap->osd_addr[o]);
574         }
575
576         if (req->r_osd)
577                 list_add(&req->r_osd_item, &req->r_osd->o_requests);
578         err = 1;   /* osd changed */
579
580 out:
581         if (newosd)
582                 put_osd(newosd);
583         return err;
584 }
585
586 /*
587  * caller should hold map_sem (for read) and request_mutex
588  */
589 static int __send_request(struct ceph_osd_client *osdc,
590                           struct ceph_osd_request *req)
591 {
592         struct ceph_osd_request_head *reqhead;
593         int err;
594
595         err = __map_osds(osdc, req);
596         if (err < 0)
597                 return err;
598         if (req->r_osd == NULL) {
599                 dout("send_request %p no up osds in pg\n", req);
600                 ceph_monc_request_next_osdmap(&osdc->client->monc);
601                 return 0;
602         }
603
604         dout("send_request %p tid %llu to osd%d flags %d\n",
605              req, req->r_tid, req->r_osd->o_osd, req->r_flags);
606
607         reqhead = req->r_request->front.iov_base;
608         reqhead->osdmap_epoch = cpu_to_le32(osdc->osdmap->epoch);
609         reqhead->flags |= cpu_to_le32(req->r_flags);  /* e.g., RETRY */
610         reqhead->reassert_version = req->r_reassert_version;
611
612         req->r_timeout_stamp = jiffies+osdc->client->mount_args->osd_timeout*HZ;
613
614         ceph_msg_get(req->r_request); /* send consumes a ref */
615         ceph_con_send(&req->r_osd->o_con, req->r_request);
616         req->r_sent = req->r_osd->o_incarnation;
617         return 0;
618 }
619
620 /*
621  * Timeout callback, called every N seconds when 1 or more osd
622  * requests has been active for more than N seconds.  When this
623  * happens, we ping all OSDs with requests who have timed out to
624  * ensure any communications channel reset is detected.  Reset the
625  * request timeouts another N seconds in the future as we go.
626  * Reschedule the timeout event another N seconds in future (unless
627  * there are no open requests).
628  */
629 static void handle_timeout(struct work_struct *work)
630 {
631         struct ceph_osd_client *osdc =
632                 container_of(work, struct ceph_osd_client, timeout_work.work);
633         struct ceph_osd_request *req;
634         struct ceph_osd *osd;
635         unsigned long timeout = osdc->client->mount_args->osd_timeout * HZ;
636         unsigned long next_timeout = timeout + jiffies;
637         struct rb_node *p;
638
639         dout("timeout\n");
640         down_read(&osdc->map_sem);
641
642         ceph_monc_request_next_osdmap(&osdc->client->monc);
643
644         mutex_lock(&osdc->request_mutex);
645         for (p = rb_first(&osdc->requests); p; p = rb_next(p)) {
646                 req = rb_entry(p, struct ceph_osd_request, r_node);
647
648                 if (req->r_resend) {
649                         int err;
650
651                         dout("osdc resending prev failed %lld\n", req->r_tid);
652                         err = __send_request(osdc, req);
653                         if (err)
654                                 dout("osdc failed again on %lld\n", req->r_tid);
655                         else
656                                 req->r_resend = false;
657                         continue;
658                 }
659         }
660         for (p = rb_first(&osdc->osds); p; p = rb_next(p)) {
661                 osd = rb_entry(p, struct ceph_osd, o_node);
662                 if (list_empty(&osd->o_requests))
663                         continue;
664                 req = list_first_entry(&osd->o_requests,
665                                        struct ceph_osd_request, r_osd_item);
666                 if (time_before(jiffies, req->r_timeout_stamp))
667                         continue;
668
669                 dout(" tid %llu (at least) timed out on osd%d\n",
670                      req->r_tid, osd->o_osd);
671                 req->r_timeout_stamp = next_timeout;
672                 ceph_con_keepalive(&osd->o_con);
673         }
674
675         if (osdc->timeout_tid)
676                 schedule_delayed_work(&osdc->timeout_work,
677                                       round_jiffies_relative(timeout));
678
679         mutex_unlock(&osdc->request_mutex);
680
681         up_read(&osdc->map_sem);
682 }
683
684 /*
685  * handle osd op reply.  either call the callback if it is specified,
686  * or do the completion to wake up the waiting thread.
687  */
688 static void handle_reply(struct ceph_osd_client *osdc, struct ceph_msg *msg)
689 {
690         struct ceph_osd_reply_head *rhead = msg->front.iov_base;
691         struct ceph_osd_request *req;
692         u64 tid;
693         int numops, object_len, flags;
694
695         if (msg->front.iov_len < sizeof(*rhead))
696                 goto bad;
697         tid = le64_to_cpu(rhead->tid);
698         numops = le32_to_cpu(rhead->num_ops);
699         object_len = le32_to_cpu(rhead->object_len);
700         if (msg->front.iov_len != sizeof(*rhead) + object_len +
701             numops * sizeof(struct ceph_osd_op))
702                 goto bad;
703         dout("handle_reply %p tid %llu\n", msg, tid);
704
705         /* lookup */
706         mutex_lock(&osdc->request_mutex);
707         req = __lookup_request(osdc, tid);
708         if (req == NULL) {
709                 dout("handle_reply tid %llu dne\n", tid);
710                 mutex_unlock(&osdc->request_mutex);
711                 return;
712         }
713         ceph_osdc_get_request(req);
714         flags = le32_to_cpu(rhead->flags);
715
716         if (req->r_reply) {
717                 /*
718                  * once we see the message has been received, we don't
719                  * need a ref (which is only needed for revoking
720                  * pages)
721                  */
722                 ceph_msg_put(req->r_reply);
723                 req->r_reply = NULL;
724         }
725
726         if (!req->r_got_reply) {
727                 unsigned bytes;
728
729                 req->r_result = le32_to_cpu(rhead->result);
730                 bytes = le32_to_cpu(msg->hdr.data_len);
731                 dout("handle_reply result %d bytes %d\n", req->r_result,
732                      bytes);
733                 if (req->r_result == 0)
734                         req->r_result = bytes;
735
736                 /* in case this is a write and we need to replay, */
737                 req->r_reassert_version = rhead->reassert_version;
738
739                 req->r_got_reply = 1;
740         } else if ((flags & CEPH_OSD_FLAG_ONDISK) == 0) {
741                 dout("handle_reply tid %llu dup ack\n", tid);
742                 goto done;
743         }
744
745         dout("handle_reply tid %llu flags %d\n", tid, flags);
746
747         /* either this is a read, or we got the safe response */
748         if ((flags & CEPH_OSD_FLAG_ONDISK) ||
749             ((flags & CEPH_OSD_FLAG_WRITE) == 0))
750                 __unregister_request(osdc, req);
751
752         mutex_unlock(&osdc->request_mutex);
753
754         if (req->r_callback)
755                 req->r_callback(req, msg);
756         else
757                 complete(&req->r_completion);
758
759         if (flags & CEPH_OSD_FLAG_ONDISK) {
760                 if (req->r_safe_callback)
761                         req->r_safe_callback(req, msg);
762                 complete(&req->r_safe_completion);  /* fsync waiter */
763         }
764
765 done:
766         ceph_osdc_put_request(req);
767         return;
768
769 bad:
770         pr_err("corrupt osd_op_reply got %d %d expected %d\n",
771                (int)msg->front.iov_len, le32_to_cpu(msg->hdr.front_len),
772                (int)sizeof(*rhead));
773 }
774
775
776 /*
777  * Resubmit osd requests whose osd or osd address has changed.  Request
778  * a new osd map if osds are down, or we are otherwise unable to determine
779  * how to direct a request.
780  *
781  * Close connections to down osds.
782  *
783  * If @who is specified, resubmit requests for that specific osd.
784  *
785  * Caller should hold map_sem for read and request_mutex.
786  */
787 static void kick_requests(struct ceph_osd_client *osdc,
788                           struct ceph_osd *kickosd)
789 {
790         struct ceph_osd_request *req;
791         struct rb_node *p, *n;
792         int needmap = 0;
793         int err;
794
795         dout("kick_requests osd%d\n", kickosd ? kickosd->o_osd : -1);
796         mutex_lock(&osdc->request_mutex);
797         if (!kickosd) {
798                 for (p = rb_first(&osdc->osds); p; p = n) {
799                         struct ceph_osd *osd =
800                                 rb_entry(p, struct ceph_osd, o_node);
801
802                         n = rb_next(p);
803                         if (!ceph_osd_is_up(osdc->osdmap, osd->o_osd) ||
804                             !ceph_entity_addr_equal(&osd->o_con.peer_addr,
805                                             ceph_osd_addr(osdc->osdmap,
806                                                           osd->o_osd)))
807                                 reset_osd(osdc, osd);
808                 }
809         }
810
811         for (p = rb_first(&osdc->requests); p; p = rb_next(p)) {
812                 req = rb_entry(p, struct ceph_osd_request, r_node);
813
814                 if (req->r_resend) {
815                         dout(" r_resend set on tid %llu\n", req->r_tid);
816                         __cancel_request(req);
817                         goto kick;
818                 }
819                 if (req->r_osd && kickosd == req->r_osd) {
820                         __cancel_request(req);
821                         goto kick;
822                 }
823
824                 err = __map_osds(osdc, req);
825                 if (err == 0)
826                         continue;  /* no change */
827                 if (err < 0) {
828                         /*
829                          * FIXME: really, we should set the request
830                          * error and fail if this isn't a 'nofail'
831                          * request, but that's a fair bit more
832                          * complicated to do.  So retry!
833                          */
834                         dout(" setting r_resend on %llu\n", req->r_tid);
835                         req->r_resend = true;
836                         continue;
837                 }
838                 if (req->r_osd == NULL) {
839                         dout("tid %llu maps to no valid osd\n", req->r_tid);
840                         needmap++;  /* request a newer map */
841                         continue;
842                 }
843
844 kick:
845                 dout("kicking %p tid %llu osd%d\n", req, req->r_tid,
846                      req->r_osd->o_osd);
847                 req->r_flags |= CEPH_OSD_FLAG_RETRY;
848                 err = __send_request(osdc, req);
849                 if (err) {
850                         dout(" setting r_resend on %llu\n", req->r_tid);
851                         req->r_resend = true;
852                 }
853         }
854         mutex_unlock(&osdc->request_mutex);
855
856         if (needmap) {
857                 dout("%d requests for down osds, need new map\n", needmap);
858                 ceph_monc_request_next_osdmap(&osdc->client->monc);
859         }
860 }
861
862 /*
863  * Process updated osd map.
864  *
865  * The message contains any number of incremental and full maps, normally
866  * indicating some sort of topology change in the cluster.  Kick requests
867  * off to different OSDs as needed.
868  */
869 void ceph_osdc_handle_map(struct ceph_osd_client *osdc, struct ceph_msg *msg)
870 {
871         void *p, *end, *next;
872         u32 nr_maps, maplen;
873         u32 epoch;
874         struct ceph_osdmap *newmap = NULL, *oldmap;
875         int err;
876         struct ceph_fsid fsid;
877
878         dout("handle_map have %u\n", osdc->osdmap ? osdc->osdmap->epoch : 0);
879         p = msg->front.iov_base;
880         end = p + msg->front.iov_len;
881
882         /* verify fsid */
883         ceph_decode_need(&p, end, sizeof(fsid), bad);
884         ceph_decode_copy(&p, &fsid, sizeof(fsid));
885         if (osdc->client->monc.have_fsid) {
886                 if (ceph_fsid_compare(&fsid,
887                                       &osdc->client->monc.monmap->fsid)) {
888                         pr_err("got osdmap with wrong fsid, ignoring\n");
889                         return;
890                 }
891         } else {
892                 ceph_fsid_set(&osdc->client->monc.monmap->fsid, &fsid);
893                 osdc->client->monc.have_fsid = true;
894         }
895
896         down_write(&osdc->map_sem);
897
898         /* incremental maps */
899         ceph_decode_32_safe(&p, end, nr_maps, bad);
900         dout(" %d inc maps\n", nr_maps);
901         while (nr_maps > 0) {
902                 ceph_decode_need(&p, end, 2*sizeof(u32), bad);
903                 epoch = ceph_decode_32(&p);
904                 maplen = ceph_decode_32(&p);
905                 ceph_decode_need(&p, end, maplen, bad);
906                 next = p + maplen;
907                 if (osdc->osdmap && osdc->osdmap->epoch+1 == epoch) {
908                         dout("applying incremental map %u len %d\n",
909                              epoch, maplen);
910                         newmap = osdmap_apply_incremental(&p, next,
911                                                           osdc->osdmap,
912                                                           osdc->client->msgr);
913                         if (IS_ERR(newmap)) {
914                                 err = PTR_ERR(newmap);
915                                 goto bad;
916                         }
917                         if (newmap != osdc->osdmap) {
918                                 ceph_osdmap_destroy(osdc->osdmap);
919                                 osdc->osdmap = newmap;
920                         }
921                 } else {
922                         dout("ignoring incremental map %u len %d\n",
923                              epoch, maplen);
924                 }
925                 p = next;
926                 nr_maps--;
927         }
928         if (newmap)
929                 goto done;
930
931         /* full maps */
932         ceph_decode_32_safe(&p, end, nr_maps, bad);
933         dout(" %d full maps\n", nr_maps);
934         while (nr_maps) {
935                 ceph_decode_need(&p, end, 2*sizeof(u32), bad);
936                 epoch = ceph_decode_32(&p);
937                 maplen = ceph_decode_32(&p);
938                 ceph_decode_need(&p, end, maplen, bad);
939                 if (nr_maps > 1) {
940                         dout("skipping non-latest full map %u len %d\n",
941                              epoch, maplen);
942                 } else if (osdc->osdmap && osdc->osdmap->epoch >= epoch) {
943                         dout("skipping full map %u len %d, "
944                              "older than our %u\n", epoch, maplen,
945                              osdc->osdmap->epoch);
946                 } else {
947                         dout("taking full map %u len %d\n", epoch, maplen);
948                         newmap = osdmap_decode(&p, p+maplen);
949                         if (IS_ERR(newmap)) {
950                                 err = PTR_ERR(newmap);
951                                 goto bad;
952                         }
953                         oldmap = osdc->osdmap;
954                         osdc->osdmap = newmap;
955                         if (oldmap)
956                                 ceph_osdmap_destroy(oldmap);
957                 }
958                 p += maplen;
959                 nr_maps--;
960         }
961
962 done:
963         downgrade_write(&osdc->map_sem);
964         ceph_monc_got_osdmap(&osdc->client->monc, osdc->osdmap->epoch);
965         if (newmap)
966                 kick_requests(osdc, NULL);
967         up_read(&osdc->map_sem);
968         return;
969
970 bad:
971         pr_err("osdc handle_map corrupt msg\n");
972         up_write(&osdc->map_sem);
973         return;
974 }
975
976
977 /*
978  * A read request prepares specific pages that data is to be read into.
979  * When a message is being read off the wire, we call prepare_pages to
980  * find those pages.
981  *  0 = success, -1 failure.
982  */
983 static int prepare_pages(struct ceph_connection *con, struct ceph_msg *m,
984                          int want)
985 {
986         struct ceph_osd *osd = con->private;
987         struct ceph_osd_client *osdc;
988         struct ceph_osd_reply_head *rhead = m->front.iov_base;
989         struct ceph_osd_request *req;
990         u64 tid;
991         int ret = -1;
992         int type = le16_to_cpu(m->hdr.type);
993
994         if (!osd)
995                 return -1;
996         osdc = osd->o_osdc;
997
998         dout("prepare_pages on msg %p want %d\n", m, want);
999         if (unlikely(type != CEPH_MSG_OSD_OPREPLY))
1000                 return -1;  /* hmm! */
1001
1002         tid = le64_to_cpu(rhead->tid);
1003         mutex_lock(&osdc->request_mutex);
1004         req = __lookup_request(osdc, tid);
1005         if (!req) {
1006                 dout("prepare_pages unknown tid %llu\n", tid);
1007                 goto out;
1008         }
1009         dout("prepare_pages tid %llu has %d pages, want %d\n",
1010              tid, req->r_num_pages, want);
1011         if (likely(req->r_num_pages >= want && !req->r_prepared_pages)) {
1012                 m->pages = req->r_pages;
1013                 m->nr_pages = req->r_num_pages;
1014                 req->r_reply = m;  /* only for duration of read over socket */
1015                 ceph_msg_get(m);
1016                 req->r_prepared_pages = 1;
1017                 ret = 0; /* success */
1018         }
1019 out:
1020         mutex_unlock(&osdc->request_mutex);
1021         return ret;
1022 }
1023
1024 /*
1025  * Register request, send initial attempt.
1026  */
1027 int ceph_osdc_start_request(struct ceph_osd_client *osdc,
1028                             struct ceph_osd_request *req,
1029                             bool nofail)
1030 {
1031         int rc = 0;
1032
1033         req->r_request->pages = req->r_pages;
1034         req->r_request->nr_pages = req->r_num_pages;
1035
1036         register_request(osdc, req);
1037
1038         down_read(&osdc->map_sem);
1039         mutex_lock(&osdc->request_mutex);
1040         /*
1041          * a racing kick_requests() may have sent the message for us
1042          * while we dropped request_mutex above, so only send now if
1043          * the request still han't been touched yet.
1044          */
1045         if (req->r_sent == 0) {
1046                 rc = __send_request(osdc, req);
1047                 if (rc) {
1048                         if (nofail) {
1049                                 dout("osdc_start_request failed send, "
1050                                      " marking %lld\n", req->r_tid);
1051                                 req->r_resend = true;
1052                                 rc = 0;
1053                         } else {
1054                                 __unregister_request(osdc, req);
1055                         }
1056                 }
1057         }
1058         mutex_unlock(&osdc->request_mutex);
1059         up_read(&osdc->map_sem);
1060         return rc;
1061 }
1062
1063 /*
1064  * wait for a request to complete
1065  */
1066 int ceph_osdc_wait_request(struct ceph_osd_client *osdc,
1067                            struct ceph_osd_request *req)
1068 {
1069         int rc;
1070
1071         rc = wait_for_completion_interruptible(&req->r_completion);
1072         if (rc < 0) {
1073                 mutex_lock(&osdc->request_mutex);
1074                 __cancel_request(req);
1075                 mutex_unlock(&osdc->request_mutex);
1076                 dout("wait_request tid %llu timed out\n", req->r_tid);
1077                 return rc;
1078         }
1079
1080         dout("wait_request tid %llu result %d\n", req->r_tid, req->r_result);
1081         return req->r_result;
1082 }
1083
1084 /*
1085  * sync - wait for all in-flight requests to flush.  avoid starvation.
1086  */
1087 void ceph_osdc_sync(struct ceph_osd_client *osdc)
1088 {
1089         struct ceph_osd_request *req;
1090         u64 last_tid, next_tid = 0;
1091
1092         mutex_lock(&osdc->request_mutex);
1093         last_tid = osdc->last_tid;
1094         while (1) {
1095                 req = __lookup_request_ge(osdc, next_tid);
1096                 if (!req)
1097                         break;
1098                 if (req->r_tid > last_tid)
1099                         break;
1100
1101                 next_tid = req->r_tid + 1;
1102                 if ((req->r_flags & CEPH_OSD_FLAG_WRITE) == 0)
1103                         continue;
1104
1105                 ceph_osdc_get_request(req);
1106                 mutex_unlock(&osdc->request_mutex);
1107                 dout("sync waiting on tid %llu (last is %llu)\n",
1108                      req->r_tid, last_tid);
1109                 wait_for_completion(&req->r_safe_completion);
1110                 mutex_lock(&osdc->request_mutex);
1111                 ceph_osdc_put_request(req);
1112         }
1113         mutex_unlock(&osdc->request_mutex);
1114         dout("sync done (thru tid %llu)\n", last_tid);
1115 }
1116
1117 /*
1118  * init, shutdown
1119  */
1120 int ceph_osdc_init(struct ceph_osd_client *osdc, struct ceph_client *client)
1121 {
1122         int err;
1123
1124         dout("init\n");
1125         osdc->client = client;
1126         osdc->osdmap = NULL;
1127         init_rwsem(&osdc->map_sem);
1128         init_completion(&osdc->map_waiters);
1129         osdc->last_requested_map = 0;
1130         mutex_init(&osdc->request_mutex);
1131         osdc->timeout_tid = 0;
1132         osdc->last_tid = 0;
1133         osdc->osds = RB_ROOT;
1134         osdc->requests = RB_ROOT;
1135         osdc->num_requests = 0;
1136         INIT_DELAYED_WORK(&osdc->timeout_work, handle_timeout);
1137
1138         err = -ENOMEM;
1139         osdc->req_mempool = mempool_create_kmalloc_pool(10,
1140                                         sizeof(struct ceph_osd_request));
1141         if (!osdc->req_mempool)
1142                 goto out;
1143
1144         err = ceph_msgpool_init(&osdc->msgpool_op, 4096, 10, true);
1145         if (err < 0)
1146                 goto out_mempool;
1147         err = ceph_msgpool_init(&osdc->msgpool_op_reply, 512, 0, false);
1148         if (err < 0)
1149                 goto out_msgpool;
1150         return 0;
1151
1152 out_msgpool:
1153         ceph_msgpool_destroy(&osdc->msgpool_op);
1154 out_mempool:
1155         mempool_destroy(osdc->req_mempool);
1156 out:
1157         return err;
1158 }
1159
1160 void ceph_osdc_stop(struct ceph_osd_client *osdc)
1161 {
1162         cancel_delayed_work_sync(&osdc->timeout_work);
1163         if (osdc->osdmap) {
1164                 ceph_osdmap_destroy(osdc->osdmap);
1165                 osdc->osdmap = NULL;
1166         }
1167         mempool_destroy(osdc->req_mempool);
1168         ceph_msgpool_destroy(&osdc->msgpool_op);
1169         ceph_msgpool_destroy(&osdc->msgpool_op_reply);
1170 }
1171
1172 /*
1173  * Read some contiguous pages.  If we cross a stripe boundary, shorten
1174  * *plen.  Return number of bytes read, or error.
1175  */
1176 int ceph_osdc_readpages(struct ceph_osd_client *osdc,
1177                         struct ceph_vino vino, struct ceph_file_layout *layout,
1178                         u64 off, u64 *plen,
1179                         u32 truncate_seq, u64 truncate_size,
1180                         struct page **pages, int num_pages)
1181 {
1182         struct ceph_osd_request *req;
1183         int rc = 0;
1184
1185         dout("readpages on ino %llx.%llx on %llu~%llu\n", vino.ino,
1186              vino.snap, off, *plen);
1187         req = ceph_osdc_new_request(osdc, layout, vino, off, plen,
1188                                     CEPH_OSD_OP_READ, CEPH_OSD_FLAG_READ,
1189                                     NULL, 0, truncate_seq, truncate_size, NULL,
1190                                     false, 1);
1191         if (IS_ERR(req))
1192                 return PTR_ERR(req);
1193
1194         /* it may be a short read due to an object boundary */
1195         req->r_pages = pages;
1196         num_pages = calc_pages_for(off, *plen);
1197         req->r_num_pages = num_pages;
1198
1199         dout("readpages  final extent is %llu~%llu (%d pages)\n",
1200              off, *plen, req->r_num_pages);
1201
1202         rc = ceph_osdc_start_request(osdc, req, false);
1203         if (!rc)
1204                 rc = ceph_osdc_wait_request(osdc, req);
1205
1206         ceph_osdc_put_request(req);
1207         dout("readpages result %d\n", rc);
1208         return rc;
1209 }
1210
1211 /*
1212  * do a synchronous write on N pages
1213  */
1214 int ceph_osdc_writepages(struct ceph_osd_client *osdc, struct ceph_vino vino,
1215                          struct ceph_file_layout *layout,
1216                          struct ceph_snap_context *snapc,
1217                          u64 off, u64 len,
1218                          u32 truncate_seq, u64 truncate_size,
1219                          struct timespec *mtime,
1220                          struct page **pages, int num_pages,
1221                          int flags, int do_sync, bool nofail)
1222 {
1223         struct ceph_osd_request *req;
1224         int rc = 0;
1225
1226         BUG_ON(vino.snap != CEPH_NOSNAP);
1227         req = ceph_osdc_new_request(osdc, layout, vino, off, &len,
1228                                     CEPH_OSD_OP_WRITE,
1229                                     flags | CEPH_OSD_FLAG_ONDISK |
1230                                             CEPH_OSD_FLAG_WRITE,
1231                                     snapc, do_sync,
1232                                     truncate_seq, truncate_size, mtime,
1233                                     nofail, 1);
1234         if (IS_ERR(req))
1235                 return PTR_ERR(req);
1236
1237         /* it may be a short write due to an object boundary */
1238         req->r_pages = pages;
1239         req->r_num_pages = calc_pages_for(off, len);
1240         dout("writepages %llu~%llu (%d pages)\n", off, len,
1241              req->r_num_pages);
1242
1243         rc = ceph_osdc_start_request(osdc, req, nofail);
1244         if (!rc)
1245                 rc = ceph_osdc_wait_request(osdc, req);
1246
1247         ceph_osdc_put_request(req);
1248         if (rc == 0)
1249                 rc = len;
1250         dout("writepages result %d\n", rc);
1251         return rc;
1252 }
1253
1254 /*
1255  * handle incoming message
1256  */
1257 static void dispatch(struct ceph_connection *con, struct ceph_msg *msg)
1258 {
1259         struct ceph_osd *osd = con->private;
1260         struct ceph_osd_client *osdc = osd->o_osdc;
1261         int type = le16_to_cpu(msg->hdr.type);
1262
1263         if (!osd)
1264                 return;
1265
1266         switch (type) {
1267         case CEPH_MSG_OSD_MAP:
1268                 ceph_osdc_handle_map(osdc, msg);
1269                 break;
1270         case CEPH_MSG_OSD_OPREPLY:
1271                 handle_reply(osdc, msg);
1272                 break;
1273
1274         default:
1275                 pr_err("received unknown message type %d %s\n", type,
1276                        ceph_msg_type_name(type));
1277         }
1278         ceph_msg_put(msg);
1279 }
1280
1281 static struct ceph_msg *alloc_msg(struct ceph_connection *con,
1282                                   struct ceph_msg_header *hdr)
1283 {
1284         struct ceph_osd *osd = con->private;
1285         struct ceph_osd_client *osdc = osd->o_osdc;
1286         int type = le16_to_cpu(hdr->type);
1287         int front = le32_to_cpu(hdr->front_len);
1288
1289         switch (type) {
1290         case CEPH_MSG_OSD_OPREPLY:
1291                 return ceph_msgpool_get(&osdc->msgpool_op_reply, front);
1292         }
1293         return ceph_alloc_msg(con, hdr);
1294 }
1295
1296 /*
1297  * Wrappers to refcount containing ceph_osd struct
1298  */
1299 static struct ceph_connection *get_osd_con(struct ceph_connection *con)
1300 {
1301         struct ceph_osd *osd = con->private;
1302         if (get_osd(osd))
1303                 return con;
1304         return NULL;
1305 }
1306
1307 static void put_osd_con(struct ceph_connection *con)
1308 {
1309         struct ceph_osd *osd = con->private;
1310         put_osd(osd);
1311 }
1312
1313 /*
1314  * authentication
1315  */
1316 static int get_authorizer(struct ceph_connection *con,
1317                           void **buf, int *len, int *proto,
1318                           void **reply_buf, int *reply_len, int force_new)
1319 {
1320         struct ceph_osd *o = con->private;
1321         struct ceph_osd_client *osdc = o->o_osdc;
1322         struct ceph_auth_client *ac = osdc->client->monc.auth;
1323         int ret = 0;
1324
1325         if (force_new && o->o_authorizer) {
1326                 ac->ops->destroy_authorizer(ac, o->o_authorizer);
1327                 o->o_authorizer = NULL;
1328         }
1329         if (o->o_authorizer == NULL) {
1330                 ret = ac->ops->create_authorizer(
1331                         ac, CEPH_ENTITY_TYPE_OSD,
1332                         &o->o_authorizer,
1333                         &o->o_authorizer_buf,
1334                         &o->o_authorizer_buf_len,
1335                         &o->o_authorizer_reply_buf,
1336                         &o->o_authorizer_reply_buf_len);
1337                 if (ret)
1338                 return ret;
1339         }
1340
1341         *proto = ac->protocol;
1342         *buf = o->o_authorizer_buf;
1343         *len = o->o_authorizer_buf_len;
1344         *reply_buf = o->o_authorizer_reply_buf;
1345         *reply_len = o->o_authorizer_reply_buf_len;
1346         return 0;
1347 }
1348
1349
1350 static int verify_authorizer_reply(struct ceph_connection *con, int len)
1351 {
1352         struct ceph_osd *o = con->private;
1353         struct ceph_osd_client *osdc = o->o_osdc;
1354         struct ceph_auth_client *ac = osdc->client->monc.auth;
1355
1356         return ac->ops->verify_authorizer_reply(ac, o->o_authorizer, len);
1357 }
1358
1359
1360 const static struct ceph_connection_operations osd_con_ops = {
1361         .get = get_osd_con,
1362         .put = put_osd_con,
1363         .dispatch = dispatch,
1364         .get_authorizer = get_authorizer,
1365         .verify_authorizer_reply = verify_authorizer_reply,
1366         .alloc_msg = alloc_msg,
1367         .fault = osd_reset,
1368         .alloc_middle = ceph_alloc_middle,
1369         .prepare_pages = prepare_pages,
1370 };