osd_client.c 54 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080
  1. #include <linux/ceph/ceph_debug.h>
  2. #include <linux/module.h>
  3. #include <linux/err.h>
  4. #include <linux/highmem.h>
  5. #include <linux/mm.h>
  6. #include <linux/pagemap.h>
  7. #include <linux/slab.h>
  8. #include <linux/uaccess.h>
  9. #ifdef CONFIG_BLOCK
  10. #include <linux/bio.h>
  11. #endif
  12. #include <linux/ceph/libceph.h>
  13. #include <linux/ceph/osd_client.h>
  14. #include <linux/ceph/messenger.h>
  15. #include <linux/ceph/decode.h>
  16. #include <linux/ceph/auth.h>
  17. #include <linux/ceph/pagelist.h>
  18. #define OSD_OP_FRONT_LEN 4096
  19. #define OSD_OPREPLY_FRONT_LEN 512
  20. static const struct ceph_connection_operations osd_con_ops;
  21. static void __send_queued(struct ceph_osd_client *osdc);
  22. static int __reset_osd(struct ceph_osd_client *osdc, struct ceph_osd *osd);
  23. static void __register_request(struct ceph_osd_client *osdc,
  24. struct ceph_osd_request *req);
  25. static void __unregister_linger_request(struct ceph_osd_client *osdc,
  26. struct ceph_osd_request *req);
  27. static void __send_request(struct ceph_osd_client *osdc,
  28. struct ceph_osd_request *req);
  29. static int op_has_extent(int op)
  30. {
  31. return (op == CEPH_OSD_OP_READ ||
  32. op == CEPH_OSD_OP_WRITE);
  33. }
  34. /*
  35. * Implement client access to distributed object storage cluster.
  36. *
  37. * All data objects are stored within a cluster/cloud of OSDs, or
  38. * "object storage devices." (Note that Ceph OSDs have _nothing_ to
  39. * do with the T10 OSD extensions to SCSI.) Ceph OSDs are simply
  40. * remote daemons serving up and coordinating consistent and safe
  41. * access to storage.
  42. *
  43. * Cluster membership and the mapping of data objects onto storage devices
  44. * are described by the osd map.
  45. *
  46. * We keep track of pending OSD requests (read, write), resubmit
  47. * requests to different OSDs when the cluster topology/data layout
  48. * change, or retry the affected requests when the communications
  49. * channel with an OSD is reset.
  50. */
  51. /*
  52. * calculate the mapping of a file extent onto an object, and fill out the
  53. * request accordingly. shorten extent as necessary if it crosses an
  54. * object boundary.
  55. *
  56. * fill osd op in request message.
  57. */
  58. static int calc_layout(struct ceph_vino vino,
  59. struct ceph_file_layout *layout,
  60. u64 off, u64 *plen,
  61. struct ceph_osd_request *req,
  62. struct ceph_osd_req_op *op)
  63. {
  64. u64 orig_len = *plen;
  65. u64 bno = 0;
  66. u64 objoff = 0;
  67. u64 objlen = 0;
  68. int r;
  69. /* object extent? */
  70. r = ceph_calc_file_object_mapping(layout, off, orig_len, &bno,
  71. &objoff, &objlen);
  72. if (r < 0)
  73. return r;
  74. if (objlen < orig_len) {
  75. *plen = objlen;
  76. dout(" skipping last %llu, final file extent %llu~%llu\n",
  77. orig_len - *plen, off, *plen);
  78. }
  79. if (op_has_extent(op->op)) {
  80. u32 osize = le32_to_cpu(layout->fl_object_size);
  81. op->extent.offset = objoff;
  82. op->extent.length = objlen;
  83. if (op->extent.truncate_size <= off - objoff) {
  84. op->extent.truncate_size = 0;
  85. } else {
  86. op->extent.truncate_size -= off - objoff;
  87. if (op->extent.truncate_size > osize)
  88. op->extent.truncate_size = osize;
  89. }
  90. }
  91. req->r_num_pages = calc_pages_for(off, *plen);
  92. req->r_page_alignment = off & ~PAGE_MASK;
  93. if (op->op == CEPH_OSD_OP_WRITE)
  94. op->payload_len = *plen;
  95. dout("calc_layout bno=%llx %llu~%llu (%d pages)\n",
  96. bno, objoff, objlen, req->r_num_pages);
  97. snprintf(req->r_oid, sizeof(req->r_oid), "%llx.%08llx", vino.ino, bno);
  98. req->r_oid_len = strlen(req->r_oid);
  99. return r;
  100. }
  101. /*
  102. * requests
  103. */
  104. void ceph_osdc_release_request(struct kref *kref)
  105. {
  106. struct ceph_osd_request *req = container_of(kref,
  107. struct ceph_osd_request,
  108. r_kref);
  109. if (req->r_request)
  110. ceph_msg_put(req->r_request);
  111. if (req->r_con_filling_msg) {
  112. dout("%s revoking msg %p from con %p\n", __func__,
  113. req->r_reply, req->r_con_filling_msg);
  114. ceph_msg_revoke_incoming(req->r_reply);
  115. req->r_con_filling_msg->ops->put(req->r_con_filling_msg);
  116. req->r_con_filling_msg = NULL;
  117. }
  118. if (req->r_reply)
  119. ceph_msg_put(req->r_reply);
  120. if (req->r_own_pages)
  121. ceph_release_page_vector(req->r_pages,
  122. req->r_num_pages);
  123. ceph_put_snap_context(req->r_snapc);
  124. ceph_pagelist_release(&req->r_trail);
  125. if (req->r_mempool)
  126. mempool_free(req, req->r_osdc->req_mempool);
  127. else
  128. kfree(req);
  129. }
  130. EXPORT_SYMBOL(ceph_osdc_release_request);
  131. struct ceph_osd_request *ceph_osdc_alloc_request(struct ceph_osd_client *osdc,
  132. struct ceph_snap_context *snapc,
  133. unsigned int num_op,
  134. bool use_mempool,
  135. gfp_t gfp_flags)
  136. {
  137. struct ceph_osd_request *req;
  138. struct ceph_msg *msg;
  139. size_t msg_size = sizeof(struct ceph_osd_request_head);
  140. msg_size += num_op*sizeof(struct ceph_osd_op);
  141. if (use_mempool) {
  142. req = mempool_alloc(osdc->req_mempool, gfp_flags);
  143. memset(req, 0, sizeof(*req));
  144. } else {
  145. req = kzalloc(sizeof(*req), gfp_flags);
  146. }
  147. if (req == NULL)
  148. return NULL;
  149. req->r_osdc = osdc;
  150. req->r_mempool = use_mempool;
  151. kref_init(&req->r_kref);
  152. init_completion(&req->r_completion);
  153. init_completion(&req->r_safe_completion);
  154. RB_CLEAR_NODE(&req->r_node);
  155. INIT_LIST_HEAD(&req->r_unsafe_item);
  156. INIT_LIST_HEAD(&req->r_linger_item);
  157. INIT_LIST_HEAD(&req->r_linger_osd);
  158. INIT_LIST_HEAD(&req->r_req_lru_item);
  159. INIT_LIST_HEAD(&req->r_osd_item);
  160. /* create reply message */
  161. if (use_mempool)
  162. msg = ceph_msgpool_get(&osdc->msgpool_op_reply, 0);
  163. else
  164. msg = ceph_msg_new(CEPH_MSG_OSD_OPREPLY,
  165. OSD_OPREPLY_FRONT_LEN, gfp_flags, true);
  166. if (!msg) {
  167. ceph_osdc_put_request(req);
  168. return NULL;
  169. }
  170. req->r_reply = msg;
  171. ceph_pagelist_init(&req->r_trail);
  172. /* create request message; allow space for oid */
  173. msg_size += MAX_OBJ_NAME_SIZE;
  174. if (snapc)
  175. msg_size += sizeof(u64) * snapc->num_snaps;
  176. if (use_mempool)
  177. msg = ceph_msgpool_get(&osdc->msgpool_op, 0);
  178. else
  179. msg = ceph_msg_new(CEPH_MSG_OSD_OP, msg_size, gfp_flags, true);
  180. if (!msg) {
  181. ceph_osdc_put_request(req);
  182. return NULL;
  183. }
  184. memset(msg->front.iov_base, 0, msg->front.iov_len);
  185. req->r_request = msg;
  186. return req;
  187. }
  188. EXPORT_SYMBOL(ceph_osdc_alloc_request);
  189. static void osd_req_encode_op(struct ceph_osd_request *req,
  190. struct ceph_osd_op *dst,
  191. struct ceph_osd_req_op *src)
  192. {
  193. dst->op = cpu_to_le16(src->op);
  194. switch (src->op) {
  195. case CEPH_OSD_OP_READ:
  196. case CEPH_OSD_OP_WRITE:
  197. dst->extent.offset =
  198. cpu_to_le64(src->extent.offset);
  199. dst->extent.length =
  200. cpu_to_le64(src->extent.length);
  201. dst->extent.truncate_size =
  202. cpu_to_le64(src->extent.truncate_size);
  203. dst->extent.truncate_seq =
  204. cpu_to_le32(src->extent.truncate_seq);
  205. break;
  206. case CEPH_OSD_OP_GETXATTR:
  207. case CEPH_OSD_OP_SETXATTR:
  208. case CEPH_OSD_OP_CMPXATTR:
  209. dst->xattr.name_len = cpu_to_le32(src->xattr.name_len);
  210. dst->xattr.value_len = cpu_to_le32(src->xattr.value_len);
  211. dst->xattr.cmp_op = src->xattr.cmp_op;
  212. dst->xattr.cmp_mode = src->xattr.cmp_mode;
  213. ceph_pagelist_append(&req->r_trail, src->xattr.name,
  214. src->xattr.name_len);
  215. ceph_pagelist_append(&req->r_trail, src->xattr.val,
  216. src->xattr.value_len);
  217. break;
  218. case CEPH_OSD_OP_CALL:
  219. dst->cls.class_len = src->cls.class_len;
  220. dst->cls.method_len = src->cls.method_len;
  221. dst->cls.indata_len = cpu_to_le32(src->cls.indata_len);
  222. ceph_pagelist_append(&req->r_trail, src->cls.class_name,
  223. src->cls.class_len);
  224. ceph_pagelist_append(&req->r_trail, src->cls.method_name,
  225. src->cls.method_len);
  226. ceph_pagelist_append(&req->r_trail, src->cls.indata,
  227. src->cls.indata_len);
  228. break;
  229. case CEPH_OSD_OP_ROLLBACK:
  230. dst->snap.snapid = cpu_to_le64(src->snap.snapid);
  231. break;
  232. case CEPH_OSD_OP_STARTSYNC:
  233. break;
  234. case CEPH_OSD_OP_NOTIFY:
  235. {
  236. __le32 prot_ver = cpu_to_le32(src->watch.prot_ver);
  237. __le32 timeout = cpu_to_le32(src->watch.timeout);
  238. ceph_pagelist_append(&req->r_trail,
  239. &prot_ver, sizeof(prot_ver));
  240. ceph_pagelist_append(&req->r_trail,
  241. &timeout, sizeof(timeout));
  242. }
  243. case CEPH_OSD_OP_NOTIFY_ACK:
  244. case CEPH_OSD_OP_WATCH:
  245. dst->watch.cookie = cpu_to_le64(src->watch.cookie);
  246. dst->watch.ver = cpu_to_le64(src->watch.ver);
  247. dst->watch.flag = src->watch.flag;
  248. break;
  249. default:
  250. pr_err("unrecognized osd opcode %d\n", dst->op);
  251. WARN_ON(1);
  252. break;
  253. }
  254. dst->payload_len = cpu_to_le32(src->payload_len);
  255. }
  256. /*
  257. * build new request AND message
  258. *
  259. */
  260. void ceph_osdc_build_request(struct ceph_osd_request *req,
  261. u64 off, u64 len, unsigned int num_op,
  262. struct ceph_osd_req_op *src_ops,
  263. struct ceph_snap_context *snapc, u64 snap_id,
  264. struct timespec *mtime)
  265. {
  266. struct ceph_msg *msg = req->r_request;
  267. struct ceph_osd_request_head *head;
  268. struct ceph_osd_req_op *src_op;
  269. struct ceph_osd_op *op;
  270. void *p;
  271. size_t msg_size = sizeof(*head) + num_op*sizeof(*op);
  272. int flags = req->r_flags;
  273. u64 data_len = 0;
  274. int i;
  275. WARN_ON((flags & (CEPH_OSD_FLAG_READ|CEPH_OSD_FLAG_WRITE)) == 0);
  276. head = msg->front.iov_base;
  277. head->snapid = cpu_to_le64(snap_id);
  278. op = (void *)(head + 1);
  279. p = (void *)(op + num_op);
  280. req->r_snapc = ceph_get_snap_context(snapc);
  281. head->client_inc = cpu_to_le32(1); /* always, for now. */
  282. head->flags = cpu_to_le32(flags);
  283. if (flags & CEPH_OSD_FLAG_WRITE)
  284. ceph_encode_timespec(&head->mtime, mtime);
  285. BUG_ON(num_op > (unsigned int) ((u16) -1));
  286. head->num_ops = cpu_to_le16(num_op);
  287. /* fill in oid */
  288. head->object_len = cpu_to_le32(req->r_oid_len);
  289. memcpy(p, req->r_oid, req->r_oid_len);
  290. p += req->r_oid_len;
  291. src_op = src_ops;
  292. while (num_op--)
  293. osd_req_encode_op(req, op++, src_op++);
  294. data_len += req->r_trail.length;
  295. if (snapc) {
  296. head->snap_seq = cpu_to_le64(snapc->seq);
  297. head->num_snaps = cpu_to_le32(snapc->num_snaps);
  298. for (i = 0; i < snapc->num_snaps; i++) {
  299. put_unaligned_le64(snapc->snaps[i], p);
  300. p += sizeof(u64);
  301. }
  302. }
  303. if (flags & CEPH_OSD_FLAG_WRITE) {
  304. req->r_request->hdr.data_off = cpu_to_le16(off);
  305. req->r_request->hdr.data_len = cpu_to_le32(len + data_len);
  306. } else if (data_len) {
  307. req->r_request->hdr.data_off = 0;
  308. req->r_request->hdr.data_len = cpu_to_le32(data_len);
  309. }
  310. req->r_request->page_alignment = req->r_page_alignment;
  311. BUG_ON(p > msg->front.iov_base + msg->front.iov_len);
  312. msg_size = p - msg->front.iov_base;
  313. msg->front.iov_len = msg_size;
  314. msg->hdr.front_len = cpu_to_le32(msg_size);
  315. return;
  316. }
  317. EXPORT_SYMBOL(ceph_osdc_build_request);
  318. /*
  319. * build new request AND message, calculate layout, and adjust file
  320. * extent as needed.
  321. *
  322. * if the file was recently truncated, we include information about its
  323. * old and new size so that the object can be updated appropriately. (we
  324. * avoid synchronously deleting truncated objects because it's slow.)
  325. *
  326. * if @do_sync, include a 'startsync' command so that the osd will flush
  327. * data quickly.
  328. */
  329. struct ceph_osd_request *ceph_osdc_new_request(struct ceph_osd_client *osdc,
  330. struct ceph_file_layout *layout,
  331. struct ceph_vino vino,
  332. u64 off, u64 *plen,
  333. int opcode, int flags,
  334. struct ceph_snap_context *snapc,
  335. int do_sync,
  336. u32 truncate_seq,
  337. u64 truncate_size,
  338. struct timespec *mtime,
  339. bool use_mempool,
  340. int page_align)
  341. {
  342. struct ceph_osd_req_op ops[2];
  343. struct ceph_osd_request *req;
  344. unsigned int num_op = 1;
  345. int r;
  346. memset(&ops, 0, sizeof ops);
  347. ops[0].op = opcode;
  348. ops[0].extent.truncate_seq = truncate_seq;
  349. ops[0].extent.truncate_size = truncate_size;
  350. if (do_sync) {
  351. ops[1].op = CEPH_OSD_OP_STARTSYNC;
  352. num_op++;
  353. }
  354. req = ceph_osdc_alloc_request(osdc, snapc, num_op, use_mempool,
  355. GFP_NOFS);
  356. if (!req)
  357. return ERR_PTR(-ENOMEM);
  358. req->r_flags = flags;
  359. /* calculate max write size */
  360. r = calc_layout(vino, layout, off, plen, req, ops);
  361. if (r < 0)
  362. return ERR_PTR(r);
  363. req->r_file_layout = *layout; /* keep a copy */
  364. /* in case it differs from natural (file) alignment that
  365. calc_layout filled in for us */
  366. req->r_num_pages = calc_pages_for(page_align, *plen);
  367. req->r_page_alignment = page_align;
  368. ceph_osdc_build_request(req, off, *plen, num_op, ops,
  369. snapc, vino.snap, mtime);
  370. return req;
  371. }
  372. EXPORT_SYMBOL(ceph_osdc_new_request);
  373. /*
  374. * We keep osd requests in an rbtree, sorted by ->r_tid.
  375. */
  376. static void __insert_request(struct ceph_osd_client *osdc,
  377. struct ceph_osd_request *new)
  378. {
  379. struct rb_node **p = &osdc->requests.rb_node;
  380. struct rb_node *parent = NULL;
  381. struct ceph_osd_request *req = NULL;
  382. while (*p) {
  383. parent = *p;
  384. req = rb_entry(parent, struct ceph_osd_request, r_node);
  385. if (new->r_tid < req->r_tid)
  386. p = &(*p)->rb_left;
  387. else if (new->r_tid > req->r_tid)
  388. p = &(*p)->rb_right;
  389. else
  390. BUG();
  391. }
  392. rb_link_node(&new->r_node, parent, p);
  393. rb_insert_color(&new->r_node, &osdc->requests);
  394. }
  395. static struct ceph_osd_request *__lookup_request(struct ceph_osd_client *osdc,
  396. u64 tid)
  397. {
  398. struct ceph_osd_request *req;
  399. struct rb_node *n = osdc->requests.rb_node;
  400. while (n) {
  401. req = rb_entry(n, struct ceph_osd_request, r_node);
  402. if (tid < req->r_tid)
  403. n = n->rb_left;
  404. else if (tid > req->r_tid)
  405. n = n->rb_right;
  406. else
  407. return req;
  408. }
  409. return NULL;
  410. }
  411. static struct ceph_osd_request *
  412. __lookup_request_ge(struct ceph_osd_client *osdc,
  413. u64 tid)
  414. {
  415. struct ceph_osd_request *req;
  416. struct rb_node *n = osdc->requests.rb_node;
  417. while (n) {
  418. req = rb_entry(n, struct ceph_osd_request, r_node);
  419. if (tid < req->r_tid) {
  420. if (!n->rb_left)
  421. return req;
  422. n = n->rb_left;
  423. } else if (tid > req->r_tid) {
  424. n = n->rb_right;
  425. } else {
  426. return req;
  427. }
  428. }
  429. return NULL;
  430. }
  431. /*
  432. * Resubmit requests pending on the given osd.
  433. */
  434. static void __kick_osd_requests(struct ceph_osd_client *osdc,
  435. struct ceph_osd *osd)
  436. {
  437. struct ceph_osd_request *req, *nreq;
  438. int err;
  439. dout("__kick_osd_requests osd%d\n", osd->o_osd);
  440. err = __reset_osd(osdc, osd);
  441. if (err)
  442. return;
  443. list_for_each_entry(req, &osd->o_requests, r_osd_item) {
  444. list_move(&req->r_req_lru_item, &osdc->req_unsent);
  445. dout("requeued %p tid %llu osd%d\n", req, req->r_tid,
  446. osd->o_osd);
  447. if (!req->r_linger)
  448. req->r_flags |= CEPH_OSD_FLAG_RETRY;
  449. }
  450. list_for_each_entry_safe(req, nreq, &osd->o_linger_requests,
  451. r_linger_osd) {
  452. /*
  453. * reregister request prior to unregistering linger so
  454. * that r_osd is preserved.
  455. */
  456. BUG_ON(!list_empty(&req->r_req_lru_item));
  457. __register_request(osdc, req);
  458. list_add(&req->r_req_lru_item, &osdc->req_unsent);
  459. list_add(&req->r_osd_item, &req->r_osd->o_requests);
  460. __unregister_linger_request(osdc, req);
  461. dout("requeued lingering %p tid %llu osd%d\n", req, req->r_tid,
  462. osd->o_osd);
  463. }
  464. }
  465. /*
  466. * If the osd connection drops, we need to resubmit all requests.
  467. */
  468. static void osd_reset(struct ceph_connection *con)
  469. {
  470. struct ceph_osd *osd = con->private;
  471. struct ceph_osd_client *osdc;
  472. if (!osd)
  473. return;
  474. dout("osd_reset osd%d\n", osd->o_osd);
  475. osdc = osd->o_osdc;
  476. down_read(&osdc->map_sem);
  477. mutex_lock(&osdc->request_mutex);
  478. __kick_osd_requests(osdc, osd);
  479. __send_queued(osdc);
  480. mutex_unlock(&osdc->request_mutex);
  481. up_read(&osdc->map_sem);
  482. }
  483. /*
  484. * Track open sessions with osds.
  485. */
  486. static struct ceph_osd *create_osd(struct ceph_osd_client *osdc, int onum)
  487. {
  488. struct ceph_osd *osd;
  489. osd = kzalloc(sizeof(*osd), GFP_NOFS);
  490. if (!osd)
  491. return NULL;
  492. atomic_set(&osd->o_ref, 1);
  493. osd->o_osdc = osdc;
  494. osd->o_osd = onum;
  495. RB_CLEAR_NODE(&osd->o_node);
  496. INIT_LIST_HEAD(&osd->o_requests);
  497. INIT_LIST_HEAD(&osd->o_linger_requests);
  498. INIT_LIST_HEAD(&osd->o_osd_lru);
  499. osd->o_incarnation = 1;
  500. ceph_con_init(&osd->o_con, osd, &osd_con_ops, &osdc->client->msgr);
  501. INIT_LIST_HEAD(&osd->o_keepalive_item);
  502. return osd;
  503. }
  504. static struct ceph_osd *get_osd(struct ceph_osd *osd)
  505. {
  506. if (atomic_inc_not_zero(&osd->o_ref)) {
  507. dout("get_osd %p %d -> %d\n", osd, atomic_read(&osd->o_ref)-1,
  508. atomic_read(&osd->o_ref));
  509. return osd;
  510. } else {
  511. dout("get_osd %p FAIL\n", osd);
  512. return NULL;
  513. }
  514. }
  515. static void put_osd(struct ceph_osd *osd)
  516. {
  517. dout("put_osd %p %d -> %d\n", osd, atomic_read(&osd->o_ref),
  518. atomic_read(&osd->o_ref) - 1);
  519. if (atomic_dec_and_test(&osd->o_ref) && osd->o_auth.authorizer) {
  520. struct ceph_auth_client *ac = osd->o_osdc->client->monc.auth;
  521. if (ac->ops && ac->ops->destroy_authorizer)
  522. ac->ops->destroy_authorizer(ac, osd->o_auth.authorizer);
  523. kfree(osd);
  524. }
  525. }
  526. /*
  527. * remove an osd from our map
  528. */
  529. static void __remove_osd(struct ceph_osd_client *osdc, struct ceph_osd *osd)
  530. {
  531. dout("__remove_osd %p\n", osd);
  532. BUG_ON(!list_empty(&osd->o_requests));
  533. rb_erase(&osd->o_node, &osdc->osds);
  534. list_del_init(&osd->o_osd_lru);
  535. ceph_con_close(&osd->o_con);
  536. put_osd(osd);
  537. }
  538. static void remove_all_osds(struct ceph_osd_client *osdc)
  539. {
  540. dout("%s %p\n", __func__, osdc);
  541. mutex_lock(&osdc->request_mutex);
  542. while (!RB_EMPTY_ROOT(&osdc->osds)) {
  543. struct ceph_osd *osd = rb_entry(rb_first(&osdc->osds),
  544. struct ceph_osd, o_node);
  545. __remove_osd(osdc, osd);
  546. }
  547. mutex_unlock(&osdc->request_mutex);
  548. }
  549. static void __move_osd_to_lru(struct ceph_osd_client *osdc,
  550. struct ceph_osd *osd)
  551. {
  552. dout("__move_osd_to_lru %p\n", osd);
  553. BUG_ON(!list_empty(&osd->o_osd_lru));
  554. list_add_tail(&osd->o_osd_lru, &osdc->osd_lru);
  555. osd->lru_ttl = jiffies + osdc->client->options->osd_idle_ttl * HZ;
  556. }
  557. static void __remove_osd_from_lru(struct ceph_osd *osd)
  558. {
  559. dout("__remove_osd_from_lru %p\n", osd);
  560. if (!list_empty(&osd->o_osd_lru))
  561. list_del_init(&osd->o_osd_lru);
  562. }
  563. static void remove_old_osds(struct ceph_osd_client *osdc)
  564. {
  565. struct ceph_osd *osd, *nosd;
  566. dout("__remove_old_osds %p\n", osdc);
  567. mutex_lock(&osdc->request_mutex);
  568. list_for_each_entry_safe(osd, nosd, &osdc->osd_lru, o_osd_lru) {
  569. if (time_before(jiffies, osd->lru_ttl))
  570. break;
  571. __remove_osd(osdc, osd);
  572. }
  573. mutex_unlock(&osdc->request_mutex);
  574. }
  575. /*
  576. * reset osd connect
  577. */
  578. static int __reset_osd(struct ceph_osd_client *osdc, struct ceph_osd *osd)
  579. {
  580. struct ceph_entity_addr *peer_addr;
  581. dout("__reset_osd %p osd%d\n", osd, osd->o_osd);
  582. if (list_empty(&osd->o_requests) &&
  583. list_empty(&osd->o_linger_requests)) {
  584. __remove_osd(osdc, osd);
  585. return -ENODEV;
  586. }
  587. peer_addr = &osdc->osdmap->osd_addr[osd->o_osd];
  588. if (!memcmp(peer_addr, &osd->o_con.peer_addr, sizeof (*peer_addr)) &&
  589. !ceph_con_opened(&osd->o_con)) {
  590. struct ceph_osd_request *req;
  591. dout(" osd addr hasn't changed and connection never opened,"
  592. " letting msgr retry");
  593. /* touch each r_stamp for handle_timeout()'s benfit */
  594. list_for_each_entry(req, &osd->o_requests, r_osd_item)
  595. req->r_stamp = jiffies;
  596. return -EAGAIN;
  597. }
  598. ceph_con_close(&osd->o_con);
  599. ceph_con_open(&osd->o_con, CEPH_ENTITY_TYPE_OSD, osd->o_osd, peer_addr);
  600. osd->o_incarnation++;
  601. return 0;
  602. }
  603. static void __insert_osd(struct ceph_osd_client *osdc, struct ceph_osd *new)
  604. {
  605. struct rb_node **p = &osdc->osds.rb_node;
  606. struct rb_node *parent = NULL;
  607. struct ceph_osd *osd = NULL;
  608. dout("__insert_osd %p osd%d\n", new, new->o_osd);
  609. while (*p) {
  610. parent = *p;
  611. osd = rb_entry(parent, struct ceph_osd, o_node);
  612. if (new->o_osd < osd->o_osd)
  613. p = &(*p)->rb_left;
  614. else if (new->o_osd > osd->o_osd)
  615. p = &(*p)->rb_right;
  616. else
  617. BUG();
  618. }
  619. rb_link_node(&new->o_node, parent, p);
  620. rb_insert_color(&new->o_node, &osdc->osds);
  621. }
  622. static struct ceph_osd *__lookup_osd(struct ceph_osd_client *osdc, int o)
  623. {
  624. struct ceph_osd *osd;
  625. struct rb_node *n = osdc->osds.rb_node;
  626. while (n) {
  627. osd = rb_entry(n, struct ceph_osd, o_node);
  628. if (o < osd->o_osd)
  629. n = n->rb_left;
  630. else if (o > osd->o_osd)
  631. n = n->rb_right;
  632. else
  633. return osd;
  634. }
  635. return NULL;
  636. }
  637. static void __schedule_osd_timeout(struct ceph_osd_client *osdc)
  638. {
  639. schedule_delayed_work(&osdc->timeout_work,
  640. osdc->client->options->osd_keepalive_timeout * HZ);
  641. }
  642. static void __cancel_osd_timeout(struct ceph_osd_client *osdc)
  643. {
  644. cancel_delayed_work(&osdc->timeout_work);
  645. }
  646. /*
  647. * Register request, assign tid. If this is the first request, set up
  648. * the timeout event.
  649. */
  650. static void __register_request(struct ceph_osd_client *osdc,
  651. struct ceph_osd_request *req)
  652. {
  653. req->r_tid = ++osdc->last_tid;
  654. req->r_request->hdr.tid = cpu_to_le64(req->r_tid);
  655. dout("__register_request %p tid %lld\n", req, req->r_tid);
  656. __insert_request(osdc, req);
  657. ceph_osdc_get_request(req);
  658. osdc->num_requests++;
  659. if (osdc->num_requests == 1) {
  660. dout(" first request, scheduling timeout\n");
  661. __schedule_osd_timeout(osdc);
  662. }
  663. }
  664. static void register_request(struct ceph_osd_client *osdc,
  665. struct ceph_osd_request *req)
  666. {
  667. mutex_lock(&osdc->request_mutex);
  668. __register_request(osdc, req);
  669. mutex_unlock(&osdc->request_mutex);
  670. }
  671. /*
  672. * called under osdc->request_mutex
  673. */
  674. static void __unregister_request(struct ceph_osd_client *osdc,
  675. struct ceph_osd_request *req)
  676. {
  677. if (RB_EMPTY_NODE(&req->r_node)) {
  678. dout("__unregister_request %p tid %lld not registered\n",
  679. req, req->r_tid);
  680. return;
  681. }
  682. dout("__unregister_request %p tid %lld\n", req, req->r_tid);
  683. rb_erase(&req->r_node, &osdc->requests);
  684. osdc->num_requests--;
  685. if (req->r_osd) {
  686. /* make sure the original request isn't in flight. */
  687. ceph_msg_revoke(req->r_request);
  688. list_del_init(&req->r_osd_item);
  689. if (list_empty(&req->r_osd->o_requests) &&
  690. list_empty(&req->r_osd->o_linger_requests)) {
  691. dout("moving osd to %p lru\n", req->r_osd);
  692. __move_osd_to_lru(osdc, req->r_osd);
  693. }
  694. if (list_empty(&req->r_linger_item))
  695. req->r_osd = NULL;
  696. }
  697. list_del_init(&req->r_req_lru_item);
  698. ceph_osdc_put_request(req);
  699. if (osdc->num_requests == 0) {
  700. dout(" no requests, canceling timeout\n");
  701. __cancel_osd_timeout(osdc);
  702. }
  703. }
  704. /*
  705. * Cancel a previously queued request message
  706. */
  707. static void __cancel_request(struct ceph_osd_request *req)
  708. {
  709. if (req->r_sent && req->r_osd) {
  710. ceph_msg_revoke(req->r_request);
  711. req->r_sent = 0;
  712. }
  713. }
  714. static void __register_linger_request(struct ceph_osd_client *osdc,
  715. struct ceph_osd_request *req)
  716. {
  717. dout("__register_linger_request %p\n", req);
  718. list_add_tail(&req->r_linger_item, &osdc->req_linger);
  719. if (req->r_osd)
  720. list_add_tail(&req->r_linger_osd,
  721. &req->r_osd->o_linger_requests);
  722. }
  723. static void __unregister_linger_request(struct ceph_osd_client *osdc,
  724. struct ceph_osd_request *req)
  725. {
  726. dout("__unregister_linger_request %p\n", req);
  727. list_del_init(&req->r_linger_item);
  728. if (req->r_osd) {
  729. list_del_init(&req->r_linger_osd);
  730. if (list_empty(&req->r_osd->o_requests) &&
  731. list_empty(&req->r_osd->o_linger_requests)) {
  732. dout("moving osd to %p lru\n", req->r_osd);
  733. __move_osd_to_lru(osdc, req->r_osd);
  734. }
  735. if (list_empty(&req->r_osd_item))
  736. req->r_osd = NULL;
  737. }
  738. }
  739. void ceph_osdc_unregister_linger_request(struct ceph_osd_client *osdc,
  740. struct ceph_osd_request *req)
  741. {
  742. mutex_lock(&osdc->request_mutex);
  743. if (req->r_linger) {
  744. __unregister_linger_request(osdc, req);
  745. ceph_osdc_put_request(req);
  746. }
  747. mutex_unlock(&osdc->request_mutex);
  748. }
  749. EXPORT_SYMBOL(ceph_osdc_unregister_linger_request);
  750. void ceph_osdc_set_request_linger(struct ceph_osd_client *osdc,
  751. struct ceph_osd_request *req)
  752. {
  753. if (!req->r_linger) {
  754. dout("set_request_linger %p\n", req);
  755. req->r_linger = 1;
  756. /*
  757. * caller is now responsible for calling
  758. * unregister_linger_request
  759. */
  760. ceph_osdc_get_request(req);
  761. }
  762. }
  763. EXPORT_SYMBOL(ceph_osdc_set_request_linger);
  764. /*
  765. * Pick an osd (the first 'up' osd in the pg), allocate the osd struct
  766. * (as needed), and set the request r_osd appropriately. If there is
  767. * no up osd, set r_osd to NULL. Move the request to the appropriate list
  768. * (unsent, homeless) or leave on in-flight lru.
  769. *
  770. * Return 0 if unchanged, 1 if changed, or negative on error.
  771. *
  772. * Caller should hold map_sem for read and request_mutex.
  773. */
  774. static int __map_request(struct ceph_osd_client *osdc,
  775. struct ceph_osd_request *req, int force_resend)
  776. {
  777. struct ceph_osd_request_head *reqhead = req->r_request->front.iov_base;
  778. struct ceph_pg pgid;
  779. int acting[CEPH_PG_MAX_SIZE];
  780. int o = -1, num = 0;
  781. int err;
  782. dout("map_request %p tid %lld\n", req, req->r_tid);
  783. err = ceph_calc_object_layout(&reqhead->layout, req->r_oid,
  784. &req->r_file_layout, osdc->osdmap);
  785. if (err) {
  786. list_move(&req->r_req_lru_item, &osdc->req_notarget);
  787. return err;
  788. }
  789. pgid = reqhead->layout.ol_pgid;
  790. req->r_pgid = pgid;
  791. err = ceph_calc_pg_acting(osdc->osdmap, pgid, acting);
  792. if (err > 0) {
  793. o = acting[0];
  794. num = err;
  795. }
  796. if ((!force_resend &&
  797. req->r_osd && req->r_osd->o_osd == o &&
  798. req->r_sent >= req->r_osd->o_incarnation &&
  799. req->r_num_pg_osds == num &&
  800. memcmp(req->r_pg_osds, acting, sizeof(acting[0])*num) == 0) ||
  801. (req->r_osd == NULL && o == -1))
  802. return 0; /* no change */
  803. dout("map_request tid %llu pgid %d.%x osd%d (was osd%d)\n",
  804. req->r_tid, le32_to_cpu(pgid.pool), le16_to_cpu(pgid.ps), o,
  805. req->r_osd ? req->r_osd->o_osd : -1);
  806. /* record full pg acting set */
  807. memcpy(req->r_pg_osds, acting, sizeof(acting[0]) * num);
  808. req->r_num_pg_osds = num;
  809. if (req->r_osd) {
  810. __cancel_request(req);
  811. list_del_init(&req->r_osd_item);
  812. req->r_osd = NULL;
  813. }
  814. req->r_osd = __lookup_osd(osdc, o);
  815. if (!req->r_osd && o >= 0) {
  816. err = -ENOMEM;
  817. req->r_osd = create_osd(osdc, o);
  818. if (!req->r_osd) {
  819. list_move(&req->r_req_lru_item, &osdc->req_notarget);
  820. goto out;
  821. }
  822. dout("map_request osd %p is osd%d\n", req->r_osd, o);
  823. __insert_osd(osdc, req->r_osd);
  824. ceph_con_open(&req->r_osd->o_con,
  825. CEPH_ENTITY_TYPE_OSD, o,
  826. &osdc->osdmap->osd_addr[o]);
  827. }
  828. if (req->r_osd) {
  829. __remove_osd_from_lru(req->r_osd);
  830. list_add(&req->r_osd_item, &req->r_osd->o_requests);
  831. list_move(&req->r_req_lru_item, &osdc->req_unsent);
  832. } else {
  833. list_move(&req->r_req_lru_item, &osdc->req_notarget);
  834. }
  835. err = 1; /* osd or pg changed */
  836. out:
  837. return err;
  838. }
  839. /*
  840. * caller should hold map_sem (for read) and request_mutex
  841. */
  842. static void __send_request(struct ceph_osd_client *osdc,
  843. struct ceph_osd_request *req)
  844. {
  845. struct ceph_osd_request_head *reqhead;
  846. dout("send_request %p tid %llu to osd%d flags %d\n",
  847. req, req->r_tid, req->r_osd->o_osd, req->r_flags);
  848. reqhead = req->r_request->front.iov_base;
  849. reqhead->osdmap_epoch = cpu_to_le32(osdc->osdmap->epoch);
  850. reqhead->flags |= cpu_to_le32(req->r_flags); /* e.g., RETRY */
  851. reqhead->reassert_version = req->r_reassert_version;
  852. req->r_stamp = jiffies;
  853. list_move_tail(&req->r_req_lru_item, &osdc->req_lru);
  854. ceph_msg_get(req->r_request); /* send consumes a ref */
  855. ceph_con_send(&req->r_osd->o_con, req->r_request);
  856. req->r_sent = req->r_osd->o_incarnation;
  857. }
  858. /*
  859. * Send any requests in the queue (req_unsent).
  860. */
  861. static void __send_queued(struct ceph_osd_client *osdc)
  862. {
  863. struct ceph_osd_request *req, *tmp;
  864. dout("__send_queued\n");
  865. list_for_each_entry_safe(req, tmp, &osdc->req_unsent, r_req_lru_item)
  866. __send_request(osdc, req);
  867. }
  868. /*
  869. * Timeout callback, called every N seconds when 1 or more osd
  870. * requests has been active for more than N seconds. When this
  871. * happens, we ping all OSDs with requests who have timed out to
  872. * ensure any communications channel reset is detected. Reset the
  873. * request timeouts another N seconds in the future as we go.
  874. * Reschedule the timeout event another N seconds in future (unless
  875. * there are no open requests).
  876. */
  877. static void handle_timeout(struct work_struct *work)
  878. {
  879. struct ceph_osd_client *osdc =
  880. container_of(work, struct ceph_osd_client, timeout_work.work);
  881. struct ceph_osd_request *req;
  882. struct ceph_osd *osd;
  883. unsigned long keepalive =
  884. osdc->client->options->osd_keepalive_timeout * HZ;
  885. struct list_head slow_osds;
  886. dout("timeout\n");
  887. down_read(&osdc->map_sem);
  888. ceph_monc_request_next_osdmap(&osdc->client->monc);
  889. mutex_lock(&osdc->request_mutex);
  890. /*
  891. * ping osds that are a bit slow. this ensures that if there
  892. * is a break in the TCP connection we will notice, and reopen
  893. * a connection with that osd (from the fault callback).
  894. */
  895. INIT_LIST_HEAD(&slow_osds);
  896. list_for_each_entry(req, &osdc->req_lru, r_req_lru_item) {
  897. if (time_before(jiffies, req->r_stamp + keepalive))
  898. break;
  899. osd = req->r_osd;
  900. BUG_ON(!osd);
  901. dout(" tid %llu is slow, will send keepalive on osd%d\n",
  902. req->r_tid, osd->o_osd);
  903. list_move_tail(&osd->o_keepalive_item, &slow_osds);
  904. }
  905. while (!list_empty(&slow_osds)) {
  906. osd = list_entry(slow_osds.next, struct ceph_osd,
  907. o_keepalive_item);
  908. list_del_init(&osd->o_keepalive_item);
  909. ceph_con_keepalive(&osd->o_con);
  910. }
  911. __schedule_osd_timeout(osdc);
  912. __send_queued(osdc);
  913. mutex_unlock(&osdc->request_mutex);
  914. up_read(&osdc->map_sem);
  915. }
  916. static void handle_osds_timeout(struct work_struct *work)
  917. {
  918. struct ceph_osd_client *osdc =
  919. container_of(work, struct ceph_osd_client,
  920. osds_timeout_work.work);
  921. unsigned long delay =
  922. osdc->client->options->osd_idle_ttl * HZ >> 2;
  923. dout("osds timeout\n");
  924. down_read(&osdc->map_sem);
  925. remove_old_osds(osdc);
  926. up_read(&osdc->map_sem);
  927. schedule_delayed_work(&osdc->osds_timeout_work,
  928. round_jiffies_relative(delay));
  929. }
  930. static void complete_request(struct ceph_osd_request *req)
  931. {
  932. if (req->r_safe_callback)
  933. req->r_safe_callback(req, NULL);
  934. complete_all(&req->r_safe_completion); /* fsync waiter */
  935. }
  936. /*
  937. * handle osd op reply. either call the callback if it is specified,
  938. * or do the completion to wake up the waiting thread.
  939. */
  940. static void handle_reply(struct ceph_osd_client *osdc, struct ceph_msg *msg,
  941. struct ceph_connection *con)
  942. {
  943. struct ceph_osd_reply_head *rhead = msg->front.iov_base;
  944. struct ceph_osd_request *req;
  945. u64 tid;
  946. int numops, object_len, flags;
  947. s32 result;
  948. tid = le64_to_cpu(msg->hdr.tid);
  949. if (msg->front.iov_len < sizeof(*rhead))
  950. goto bad;
  951. numops = le32_to_cpu(rhead->num_ops);
  952. object_len = le32_to_cpu(rhead->object_len);
  953. result = le32_to_cpu(rhead->result);
  954. if (msg->front.iov_len != sizeof(*rhead) + object_len +
  955. numops * sizeof(struct ceph_osd_op))
  956. goto bad;
  957. dout("handle_reply %p tid %llu result %d\n", msg, tid, (int)result);
  958. /* lookup */
  959. mutex_lock(&osdc->request_mutex);
  960. req = __lookup_request(osdc, tid);
  961. if (req == NULL) {
  962. dout("handle_reply tid %llu dne\n", tid);
  963. mutex_unlock(&osdc->request_mutex);
  964. return;
  965. }
  966. ceph_osdc_get_request(req);
  967. flags = le32_to_cpu(rhead->flags);
  968. /*
  969. * if this connection filled our message, drop our reference now, to
  970. * avoid a (safe but slower) revoke later.
  971. */
  972. if (req->r_con_filling_msg == con && req->r_reply == msg) {
  973. dout(" dropping con_filling_msg ref %p\n", con);
  974. req->r_con_filling_msg = NULL;
  975. con->ops->put(con);
  976. }
  977. if (!req->r_got_reply) {
  978. unsigned int bytes;
  979. req->r_result = le32_to_cpu(rhead->result);
  980. bytes = le32_to_cpu(msg->hdr.data_len);
  981. dout("handle_reply result %d bytes %d\n", req->r_result,
  982. bytes);
  983. if (req->r_result == 0)
  984. req->r_result = bytes;
  985. /* in case this is a write and we need to replay, */
  986. req->r_reassert_version = rhead->reassert_version;
  987. req->r_got_reply = 1;
  988. } else if ((flags & CEPH_OSD_FLAG_ONDISK) == 0) {
  989. dout("handle_reply tid %llu dup ack\n", tid);
  990. mutex_unlock(&osdc->request_mutex);
  991. goto done;
  992. }
  993. dout("handle_reply tid %llu flags %d\n", tid, flags);
  994. if (req->r_linger && (flags & CEPH_OSD_FLAG_ONDISK))
  995. __register_linger_request(osdc, req);
  996. /* either this is a read, or we got the safe response */
  997. if (result < 0 ||
  998. (flags & CEPH_OSD_FLAG_ONDISK) ||
  999. ((flags & CEPH_OSD_FLAG_WRITE) == 0))
  1000. __unregister_request(osdc, req);
  1001. mutex_unlock(&osdc->request_mutex);
  1002. if (req->r_callback)
  1003. req->r_callback(req, msg);
  1004. else
  1005. complete_all(&req->r_completion);
  1006. if (flags & CEPH_OSD_FLAG_ONDISK)
  1007. complete_request(req);
  1008. done:
  1009. dout("req=%p req->r_linger=%d\n", req, req->r_linger);
  1010. ceph_osdc_put_request(req);
  1011. return;
  1012. bad:
  1013. pr_err("corrupt osd_op_reply got %d %d expected %d\n",
  1014. (int)msg->front.iov_len, le32_to_cpu(msg->hdr.front_len),
  1015. (int)sizeof(*rhead));
  1016. ceph_msg_dump(msg);
  1017. }
  1018. static void reset_changed_osds(struct ceph_osd_client *osdc)
  1019. {
  1020. struct rb_node *p, *n;
  1021. for (p = rb_first(&osdc->osds); p; p = n) {
  1022. struct ceph_osd *osd = rb_entry(p, struct ceph_osd, o_node);
  1023. n = rb_next(p);
  1024. if (!ceph_osd_is_up(osdc->osdmap, osd->o_osd) ||
  1025. memcmp(&osd->o_con.peer_addr,
  1026. ceph_osd_addr(osdc->osdmap,
  1027. osd->o_osd),
  1028. sizeof(struct ceph_entity_addr)) != 0)
  1029. __reset_osd(osdc, osd);
  1030. }
  1031. }
  1032. /*
  1033. * Requeue requests whose mapping to an OSD has changed. If requests map to
  1034. * no osd, request a new map.
  1035. *
  1036. * Caller should hold map_sem for read.
  1037. */
  1038. static void kick_requests(struct ceph_osd_client *osdc, int force_resend)
  1039. {
  1040. struct ceph_osd_request *req, *nreq;
  1041. struct rb_node *p;
  1042. int needmap = 0;
  1043. int err;
  1044. dout("kick_requests %s\n", force_resend ? " (force resend)" : "");
  1045. mutex_lock(&osdc->request_mutex);
  1046. for (p = rb_first(&osdc->requests); p; ) {
  1047. req = rb_entry(p, struct ceph_osd_request, r_node);
  1048. p = rb_next(p);
  1049. /*
  1050. * For linger requests that have not yet been
  1051. * registered, move them to the linger list; they'll
  1052. * be sent to the osd in the loop below. Unregister
  1053. * the request before re-registering it as a linger
  1054. * request to ensure the __map_request() below
  1055. * will decide it needs to be sent.
  1056. */
  1057. if (req->r_linger && list_empty(&req->r_linger_item)) {
  1058. dout("%p tid %llu restart on osd%d\n",
  1059. req, req->r_tid,
  1060. req->r_osd ? req->r_osd->o_osd : -1);
  1061. __unregister_request(osdc, req);
  1062. __register_linger_request(osdc, req);
  1063. continue;
  1064. }
  1065. err = __map_request(osdc, req, force_resend);
  1066. if (err < 0)
  1067. continue; /* error */
  1068. if (req->r_osd == NULL) {
  1069. dout("%p tid %llu maps to no osd\n", req, req->r_tid);
  1070. needmap++; /* request a newer map */
  1071. } else if (err > 0) {
  1072. if (!req->r_linger) {
  1073. dout("%p tid %llu requeued on osd%d\n", req,
  1074. req->r_tid,
  1075. req->r_osd ? req->r_osd->o_osd : -1);
  1076. req->r_flags |= CEPH_OSD_FLAG_RETRY;
  1077. }
  1078. }
  1079. }
  1080. list_for_each_entry_safe(req, nreq, &osdc->req_linger,
  1081. r_linger_item) {
  1082. dout("linger req=%p req->r_osd=%p\n", req, req->r_osd);
  1083. err = __map_request(osdc, req, force_resend);
  1084. dout("__map_request returned %d\n", err);
  1085. if (err == 0)
  1086. continue; /* no change and no osd was specified */
  1087. if (err < 0)
  1088. continue; /* hrm! */
  1089. if (req->r_osd == NULL) {
  1090. dout("tid %llu maps to no valid osd\n", req->r_tid);
  1091. needmap++; /* request a newer map */
  1092. continue;
  1093. }
  1094. dout("kicking lingering %p tid %llu osd%d\n", req, req->r_tid,
  1095. req->r_osd ? req->r_osd->o_osd : -1);
  1096. __register_request(osdc, req);
  1097. __unregister_linger_request(osdc, req);
  1098. }
  1099. mutex_unlock(&osdc->request_mutex);
  1100. if (needmap) {
  1101. dout("%d requests for down osds, need new map\n", needmap);
  1102. ceph_monc_request_next_osdmap(&osdc->client->monc);
  1103. }
  1104. reset_changed_osds(osdc);
  1105. }
  1106. /*
  1107. * Process updated osd map.
  1108. *
  1109. * The message contains any number of incremental and full maps, normally
  1110. * indicating some sort of topology change in the cluster. Kick requests
  1111. * off to different OSDs as needed.
  1112. */
  1113. void ceph_osdc_handle_map(struct ceph_osd_client *osdc, struct ceph_msg *msg)
  1114. {
  1115. void *p, *end, *next;
  1116. u32 nr_maps, maplen;
  1117. u32 epoch;
  1118. struct ceph_osdmap *newmap = NULL, *oldmap;
  1119. int err;
  1120. struct ceph_fsid fsid;
  1121. dout("handle_map have %u\n", osdc->osdmap ? osdc->osdmap->epoch : 0);
  1122. p = msg->front.iov_base;
  1123. end = p + msg->front.iov_len;
  1124. /* verify fsid */
  1125. ceph_decode_need(&p, end, sizeof(fsid), bad);
  1126. ceph_decode_copy(&p, &fsid, sizeof(fsid));
  1127. if (ceph_check_fsid(osdc->client, &fsid) < 0)
  1128. return;
  1129. down_write(&osdc->map_sem);
  1130. /* incremental maps */
  1131. ceph_decode_32_safe(&p, end, nr_maps, bad);
  1132. dout(" %d inc maps\n", nr_maps);
  1133. while (nr_maps > 0) {
  1134. ceph_decode_need(&p, end, 2*sizeof(u32), bad);
  1135. epoch = ceph_decode_32(&p);
  1136. maplen = ceph_decode_32(&p);
  1137. ceph_decode_need(&p, end, maplen, bad);
  1138. next = p + maplen;
  1139. if (osdc->osdmap && osdc->osdmap->epoch+1 == epoch) {
  1140. dout("applying incremental map %u len %d\n",
  1141. epoch, maplen);
  1142. newmap = osdmap_apply_incremental(&p, next,
  1143. osdc->osdmap,
  1144. &osdc->client->msgr);
  1145. if (IS_ERR(newmap)) {
  1146. err = PTR_ERR(newmap);
  1147. goto bad;
  1148. }
  1149. BUG_ON(!newmap);
  1150. if (newmap != osdc->osdmap) {
  1151. ceph_osdmap_destroy(osdc->osdmap);
  1152. osdc->osdmap = newmap;
  1153. }
  1154. kick_requests(osdc, 0);
  1155. } else {
  1156. dout("ignoring incremental map %u len %d\n",
  1157. epoch, maplen);
  1158. }
  1159. p = next;
  1160. nr_maps--;
  1161. }
  1162. if (newmap)
  1163. goto done;
  1164. /* full maps */
  1165. ceph_decode_32_safe(&p, end, nr_maps, bad);
  1166. dout(" %d full maps\n", nr_maps);
  1167. while (nr_maps) {
  1168. ceph_decode_need(&p, end, 2*sizeof(u32), bad);
  1169. epoch = ceph_decode_32(&p);
  1170. maplen = ceph_decode_32(&p);
  1171. ceph_decode_need(&p, end, maplen, bad);
  1172. if (nr_maps > 1) {
  1173. dout("skipping non-latest full map %u len %d\n",
  1174. epoch, maplen);
  1175. } else if (osdc->osdmap && osdc->osdmap->epoch >= epoch) {
  1176. dout("skipping full map %u len %d, "
  1177. "older than our %u\n", epoch, maplen,
  1178. osdc->osdmap->epoch);
  1179. } else {
  1180. int skipped_map = 0;
  1181. dout("taking full map %u len %d\n", epoch, maplen);
  1182. newmap = osdmap_decode(&p, p+maplen);
  1183. if (IS_ERR(newmap)) {
  1184. err = PTR_ERR(newmap);
  1185. goto bad;
  1186. }
  1187. BUG_ON(!newmap);
  1188. oldmap = osdc->osdmap;
  1189. osdc->osdmap = newmap;
  1190. if (oldmap) {
  1191. if (oldmap->epoch + 1 < newmap->epoch)
  1192. skipped_map = 1;
  1193. ceph_osdmap_destroy(oldmap);
  1194. }
  1195. kick_requests(osdc, skipped_map);
  1196. }
  1197. p += maplen;
  1198. nr_maps--;
  1199. }
  1200. done:
  1201. downgrade_write(&osdc->map_sem);
  1202. ceph_monc_got_osdmap(&osdc->client->monc, osdc->osdmap->epoch);
  1203. /*
  1204. * subscribe to subsequent osdmap updates if full to ensure
  1205. * we find out when we are no longer full and stop returning
  1206. * ENOSPC.
  1207. */
  1208. if (ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_FULL))
  1209. ceph_monc_request_next_osdmap(&osdc->client->monc);
  1210. mutex_lock(&osdc->request_mutex);
  1211. __send_queued(osdc);
  1212. mutex_unlock(&osdc->request_mutex);
  1213. up_read(&osdc->map_sem);
  1214. wake_up_all(&osdc->client->auth_wq);
  1215. return;
  1216. bad:
  1217. pr_err("osdc handle_map corrupt msg\n");
  1218. ceph_msg_dump(msg);
  1219. up_write(&osdc->map_sem);
  1220. return;
  1221. }
  1222. /*
  1223. * watch/notify callback event infrastructure
  1224. *
  1225. * These callbacks are used both for watch and notify operations.
  1226. */
  1227. static void __release_event(struct kref *kref)
  1228. {
  1229. struct ceph_osd_event *event =
  1230. container_of(kref, struct ceph_osd_event, kref);
  1231. dout("__release_event %p\n", event);
  1232. kfree(event);
  1233. }
  1234. static void get_event(struct ceph_osd_event *event)
  1235. {
  1236. kref_get(&event->kref);
  1237. }
  1238. void ceph_osdc_put_event(struct ceph_osd_event *event)
  1239. {
  1240. kref_put(&event->kref, __release_event);
  1241. }
  1242. EXPORT_SYMBOL(ceph_osdc_put_event);
  1243. static void __insert_event(struct ceph_osd_client *osdc,
  1244. struct ceph_osd_event *new)
  1245. {
  1246. struct rb_node **p = &osdc->event_tree.rb_node;
  1247. struct rb_node *parent = NULL;
  1248. struct ceph_osd_event *event = NULL;
  1249. while (*p) {
  1250. parent = *p;
  1251. event = rb_entry(parent, struct ceph_osd_event, node);
  1252. if (new->cookie < event->cookie)
  1253. p = &(*p)->rb_left;
  1254. else if (new->cookie > event->cookie)
  1255. p = &(*p)->rb_right;
  1256. else
  1257. BUG();
  1258. }
  1259. rb_link_node(&new->node, parent, p);
  1260. rb_insert_color(&new->node, &osdc->event_tree);
  1261. }
  1262. static struct ceph_osd_event *__find_event(struct ceph_osd_client *osdc,
  1263. u64 cookie)
  1264. {
  1265. struct rb_node **p = &osdc->event_tree.rb_node;
  1266. struct rb_node *parent = NULL;
  1267. struct ceph_osd_event *event = NULL;
  1268. while (*p) {
  1269. parent = *p;
  1270. event = rb_entry(parent, struct ceph_osd_event, node);
  1271. if (cookie < event->cookie)
  1272. p = &(*p)->rb_left;
  1273. else if (cookie > event->cookie)
  1274. p = &(*p)->rb_right;
  1275. else
  1276. return event;
  1277. }
  1278. return NULL;
  1279. }
  1280. static void __remove_event(struct ceph_osd_event *event)
  1281. {
  1282. struct ceph_osd_client *osdc = event->osdc;
  1283. if (!RB_EMPTY_NODE(&event->node)) {
  1284. dout("__remove_event removed %p\n", event);
  1285. rb_erase(&event->node, &osdc->event_tree);
  1286. ceph_osdc_put_event(event);
  1287. } else {
  1288. dout("__remove_event didn't remove %p\n", event);
  1289. }
  1290. }
  1291. int ceph_osdc_create_event(struct ceph_osd_client *osdc,
  1292. void (*event_cb)(u64, u64, u8, void *),
  1293. void *data, struct ceph_osd_event **pevent)
  1294. {
  1295. struct ceph_osd_event *event;
  1296. event = kmalloc(sizeof(*event), GFP_NOIO);
  1297. if (!event)
  1298. return -ENOMEM;
  1299. dout("create_event %p\n", event);
  1300. event->cb = event_cb;
  1301. event->one_shot = 0;
  1302. event->data = data;
  1303. event->osdc = osdc;
  1304. INIT_LIST_HEAD(&event->osd_node);
  1305. RB_CLEAR_NODE(&event->node);
  1306. kref_init(&event->kref); /* one ref for us */
  1307. kref_get(&event->kref); /* one ref for the caller */
  1308. spin_lock(&osdc->event_lock);
  1309. event->cookie = ++osdc->event_count;
  1310. __insert_event(osdc, event);
  1311. spin_unlock(&osdc->event_lock);
  1312. *pevent = event;
  1313. return 0;
  1314. }
  1315. EXPORT_SYMBOL(ceph_osdc_create_event);
  1316. void ceph_osdc_cancel_event(struct ceph_osd_event *event)
  1317. {
  1318. struct ceph_osd_client *osdc = event->osdc;
  1319. dout("cancel_event %p\n", event);
  1320. spin_lock(&osdc->event_lock);
  1321. __remove_event(event);
  1322. spin_unlock(&osdc->event_lock);
  1323. ceph_osdc_put_event(event); /* caller's */
  1324. }
  1325. EXPORT_SYMBOL(ceph_osdc_cancel_event);
  1326. static void do_event_work(struct work_struct *work)
  1327. {
  1328. struct ceph_osd_event_work *event_work =
  1329. container_of(work, struct ceph_osd_event_work, work);
  1330. struct ceph_osd_event *event = event_work->event;
  1331. u64 ver = event_work->ver;
  1332. u64 notify_id = event_work->notify_id;
  1333. u8 opcode = event_work->opcode;
  1334. dout("do_event_work completing %p\n", event);
  1335. event->cb(ver, notify_id, opcode, event->data);
  1336. dout("do_event_work completed %p\n", event);
  1337. ceph_osdc_put_event(event);
  1338. kfree(event_work);
  1339. }
  1340. /*
  1341. * Process osd watch notifications
  1342. */
  1343. static void handle_watch_notify(struct ceph_osd_client *osdc,
  1344. struct ceph_msg *msg)
  1345. {
  1346. void *p, *end;
  1347. u8 proto_ver;
  1348. u64 cookie, ver, notify_id;
  1349. u8 opcode;
  1350. struct ceph_osd_event *event;
  1351. struct ceph_osd_event_work *event_work;
  1352. p = msg->front.iov_base;
  1353. end = p + msg->front.iov_len;
  1354. ceph_decode_8_safe(&p, end, proto_ver, bad);
  1355. ceph_decode_8_safe(&p, end, opcode, bad);
  1356. ceph_decode_64_safe(&p, end, cookie, bad);
  1357. ceph_decode_64_safe(&p, end, ver, bad);
  1358. ceph_decode_64_safe(&p, end, notify_id, bad);
  1359. spin_lock(&osdc->event_lock);
  1360. event = __find_event(osdc, cookie);
  1361. if (event) {
  1362. BUG_ON(event->one_shot);
  1363. get_event(event);
  1364. }
  1365. spin_unlock(&osdc->event_lock);
  1366. dout("handle_watch_notify cookie %lld ver %lld event %p\n",
  1367. cookie, ver, event);
  1368. if (event) {
  1369. event_work = kmalloc(sizeof(*event_work), GFP_NOIO);
  1370. if (!event_work) {
  1371. dout("ERROR: could not allocate event_work\n");
  1372. goto done_err;
  1373. }
  1374. INIT_WORK(&event_work->work, do_event_work);
  1375. event_work->event = event;
  1376. event_work->ver = ver;
  1377. event_work->notify_id = notify_id;
  1378. event_work->opcode = opcode;
  1379. if (!queue_work(osdc->notify_wq, &event_work->work)) {
  1380. dout("WARNING: failed to queue notify event work\n");
  1381. goto done_err;
  1382. }
  1383. }
  1384. return;
  1385. done_err:
  1386. ceph_osdc_put_event(event);
  1387. return;
  1388. bad:
  1389. pr_err("osdc handle_watch_notify corrupt msg\n");
  1390. return;
  1391. }
  1392. /*
  1393. * Register request, send initial attempt.
  1394. */
  1395. int ceph_osdc_start_request(struct ceph_osd_client *osdc,
  1396. struct ceph_osd_request *req,
  1397. bool nofail)
  1398. {
  1399. int rc = 0;
  1400. req->r_request->pages = req->r_pages;
  1401. req->r_request->nr_pages = req->r_num_pages;
  1402. #ifdef CONFIG_BLOCK
  1403. req->r_request->bio = req->r_bio;
  1404. #endif
  1405. req->r_request->trail = &req->r_trail;
  1406. register_request(osdc, req);
  1407. down_read(&osdc->map_sem);
  1408. mutex_lock(&osdc->request_mutex);
  1409. /*
  1410. * a racing kick_requests() may have sent the message for us
  1411. * while we dropped request_mutex above, so only send now if
  1412. * the request still han't been touched yet.
  1413. */
  1414. if (req->r_sent == 0) {
  1415. rc = __map_request(osdc, req, 0);
  1416. if (rc < 0) {
  1417. if (nofail) {
  1418. dout("osdc_start_request failed map, "
  1419. " will retry %lld\n", req->r_tid);
  1420. rc = 0;
  1421. }
  1422. goto out_unlock;
  1423. }
  1424. if (req->r_osd == NULL) {
  1425. dout("send_request %p no up osds in pg\n", req);
  1426. ceph_monc_request_next_osdmap(&osdc->client->monc);
  1427. } else {
  1428. __send_request(osdc, req);
  1429. }
  1430. rc = 0;
  1431. }
  1432. out_unlock:
  1433. mutex_unlock(&osdc->request_mutex);
  1434. up_read(&osdc->map_sem);
  1435. return rc;
  1436. }
  1437. EXPORT_SYMBOL(ceph_osdc_start_request);
  1438. /*
  1439. * wait for a request to complete
  1440. */
  1441. int ceph_osdc_wait_request(struct ceph_osd_client *osdc,
  1442. struct ceph_osd_request *req)
  1443. {
  1444. int rc;
  1445. rc = wait_for_completion_interruptible(&req->r_completion);
  1446. if (rc < 0) {
  1447. mutex_lock(&osdc->request_mutex);
  1448. __cancel_request(req);
  1449. __unregister_request(osdc, req);
  1450. mutex_unlock(&osdc->request_mutex);
  1451. complete_request(req);
  1452. dout("wait_request tid %llu canceled/timed out\n", req->r_tid);
  1453. return rc;
  1454. }
  1455. dout("wait_request tid %llu result %d\n", req->r_tid, req->r_result);
  1456. return req->r_result;
  1457. }
  1458. EXPORT_SYMBOL(ceph_osdc_wait_request);
  1459. /*
  1460. * sync - wait for all in-flight requests to flush. avoid starvation.
  1461. */
  1462. void ceph_osdc_sync(struct ceph_osd_client *osdc)
  1463. {
  1464. struct ceph_osd_request *req;
  1465. u64 last_tid, next_tid = 0;
  1466. mutex_lock(&osdc->request_mutex);
  1467. last_tid = osdc->last_tid;
  1468. while (1) {
  1469. req = __lookup_request_ge(osdc, next_tid);
  1470. if (!req)
  1471. break;
  1472. if (req->r_tid > last_tid)
  1473. break;
  1474. next_tid = req->r_tid + 1;
  1475. if ((req->r_flags & CEPH_OSD_FLAG_WRITE) == 0)
  1476. continue;
  1477. ceph_osdc_get_request(req);
  1478. mutex_unlock(&osdc->request_mutex);
  1479. dout("sync waiting on tid %llu (last is %llu)\n",
  1480. req->r_tid, last_tid);
  1481. wait_for_completion(&req->r_safe_completion);
  1482. mutex_lock(&osdc->request_mutex);
  1483. ceph_osdc_put_request(req);
  1484. }
  1485. mutex_unlock(&osdc->request_mutex);
  1486. dout("sync done (thru tid %llu)\n", last_tid);
  1487. }
  1488. EXPORT_SYMBOL(ceph_osdc_sync);
  1489. /*
  1490. * init, shutdown
  1491. */
  1492. int ceph_osdc_init(struct ceph_osd_client *osdc, struct ceph_client *client)
  1493. {
  1494. int err;
  1495. dout("init\n");
  1496. osdc->client = client;
  1497. osdc->osdmap = NULL;
  1498. init_rwsem(&osdc->map_sem);
  1499. init_completion(&osdc->map_waiters);
  1500. osdc->last_requested_map = 0;
  1501. mutex_init(&osdc->request_mutex);
  1502. osdc->last_tid = 0;
  1503. osdc->osds = RB_ROOT;
  1504. INIT_LIST_HEAD(&osdc->osd_lru);
  1505. osdc->requests = RB_ROOT;
  1506. INIT_LIST_HEAD(&osdc->req_lru);
  1507. INIT_LIST_HEAD(&osdc->req_unsent);
  1508. INIT_LIST_HEAD(&osdc->req_notarget);
  1509. INIT_LIST_HEAD(&osdc->req_linger);
  1510. osdc->num_requests = 0;
  1511. INIT_DELAYED_WORK(&osdc->timeout_work, handle_timeout);
  1512. INIT_DELAYED_WORK(&osdc->osds_timeout_work, handle_osds_timeout);
  1513. spin_lock_init(&osdc->event_lock);
  1514. osdc->event_tree = RB_ROOT;
  1515. osdc->event_count = 0;
  1516. schedule_delayed_work(&osdc->osds_timeout_work,
  1517. round_jiffies_relative(osdc->client->options->osd_idle_ttl * HZ));
  1518. err = -ENOMEM;
  1519. osdc->req_mempool = mempool_create_kmalloc_pool(10,
  1520. sizeof(struct ceph_osd_request));
  1521. if (!osdc->req_mempool)
  1522. goto out;
  1523. err = ceph_msgpool_init(&osdc->msgpool_op, CEPH_MSG_OSD_OP,
  1524. OSD_OP_FRONT_LEN, 10, true,
  1525. "osd_op");
  1526. if (err < 0)
  1527. goto out_mempool;
  1528. err = ceph_msgpool_init(&osdc->msgpool_op_reply, CEPH_MSG_OSD_OPREPLY,
  1529. OSD_OPREPLY_FRONT_LEN, 10, true,
  1530. "osd_op_reply");
  1531. if (err < 0)
  1532. goto out_msgpool;
  1533. osdc->notify_wq = create_singlethread_workqueue("ceph-watch-notify");
  1534. if (IS_ERR(osdc->notify_wq)) {
  1535. err = PTR_ERR(osdc->notify_wq);
  1536. osdc->notify_wq = NULL;
  1537. goto out_msgpool;
  1538. }
  1539. return 0;
  1540. out_msgpool:
  1541. ceph_msgpool_destroy(&osdc->msgpool_op);
  1542. out_mempool:
  1543. mempool_destroy(osdc->req_mempool);
  1544. out:
  1545. return err;
  1546. }
  1547. void ceph_osdc_stop(struct ceph_osd_client *osdc)
  1548. {
  1549. flush_workqueue(osdc->notify_wq);
  1550. destroy_workqueue(osdc->notify_wq);
  1551. cancel_delayed_work_sync(&osdc->timeout_work);
  1552. cancel_delayed_work_sync(&osdc->osds_timeout_work);
  1553. if (osdc->osdmap) {
  1554. ceph_osdmap_destroy(osdc->osdmap);
  1555. osdc->osdmap = NULL;
  1556. }
  1557. remove_all_osds(osdc);
  1558. mempool_destroy(osdc->req_mempool);
  1559. ceph_msgpool_destroy(&osdc->msgpool_op);
  1560. ceph_msgpool_destroy(&osdc->msgpool_op_reply);
  1561. }
  1562. /*
  1563. * Read some contiguous pages. If we cross a stripe boundary, shorten
  1564. * *plen. Return number of bytes read, or error.
  1565. */
  1566. int ceph_osdc_readpages(struct ceph_osd_client *osdc,
  1567. struct ceph_vino vino, struct ceph_file_layout *layout,
  1568. u64 off, u64 *plen,
  1569. u32 truncate_seq, u64 truncate_size,
  1570. struct page **pages, int num_pages, int page_align)
  1571. {
  1572. struct ceph_osd_request *req;
  1573. int rc = 0;
  1574. dout("readpages on ino %llx.%llx on %llu~%llu\n", vino.ino,
  1575. vino.snap, off, *plen);
  1576. req = ceph_osdc_new_request(osdc, layout, vino, off, plen,
  1577. CEPH_OSD_OP_READ, CEPH_OSD_FLAG_READ,
  1578. NULL, 0, truncate_seq, truncate_size, NULL,
  1579. false, page_align);
  1580. if (IS_ERR(req))
  1581. return PTR_ERR(req);
  1582. /* it may be a short read due to an object boundary */
  1583. req->r_pages = pages;
  1584. dout("readpages final extent is %llu~%llu (%d pages align %d)\n",
  1585. off, *plen, req->r_num_pages, page_align);
  1586. rc = ceph_osdc_start_request(osdc, req, false);
  1587. if (!rc)
  1588. rc = ceph_osdc_wait_request(osdc, req);
  1589. ceph_osdc_put_request(req);
  1590. dout("readpages result %d\n", rc);
  1591. return rc;
  1592. }
  1593. EXPORT_SYMBOL(ceph_osdc_readpages);
  1594. /*
  1595. * do a synchronous write on N pages
  1596. */
  1597. int ceph_osdc_writepages(struct ceph_osd_client *osdc, struct ceph_vino vino,
  1598. struct ceph_file_layout *layout,
  1599. struct ceph_snap_context *snapc,
  1600. u64 off, u64 len,
  1601. u32 truncate_seq, u64 truncate_size,
  1602. struct timespec *mtime,
  1603. struct page **pages, int num_pages)
  1604. {
  1605. struct ceph_osd_request *req;
  1606. int rc = 0;
  1607. int page_align = off & ~PAGE_MASK;
  1608. BUG_ON(vino.snap != CEPH_NOSNAP);
  1609. req = ceph_osdc_new_request(osdc, layout, vino, off, &len,
  1610. CEPH_OSD_OP_WRITE,
  1611. CEPH_OSD_FLAG_ONDISK | CEPH_OSD_FLAG_WRITE,
  1612. snapc, 0,
  1613. truncate_seq, truncate_size, mtime,
  1614. true, page_align);
  1615. if (IS_ERR(req))
  1616. return PTR_ERR(req);
  1617. /* it may be a short write due to an object boundary */
  1618. req->r_pages = pages;
  1619. dout("writepages %llu~%llu (%d pages)\n", off, len,
  1620. req->r_num_pages);
  1621. rc = ceph_osdc_start_request(osdc, req, true);
  1622. if (!rc)
  1623. rc = ceph_osdc_wait_request(osdc, req);
  1624. ceph_osdc_put_request(req);
  1625. if (rc == 0)
  1626. rc = len;
  1627. dout("writepages result %d\n", rc);
  1628. return rc;
  1629. }
  1630. EXPORT_SYMBOL(ceph_osdc_writepages);
  1631. /*
  1632. * handle incoming message
  1633. */
  1634. static void dispatch(struct ceph_connection *con, struct ceph_msg *msg)
  1635. {
  1636. struct ceph_osd *osd = con->private;
  1637. struct ceph_osd_client *osdc;
  1638. int type = le16_to_cpu(msg->hdr.type);
  1639. if (!osd)
  1640. goto out;
  1641. osdc = osd->o_osdc;
  1642. switch (type) {
  1643. case CEPH_MSG_OSD_MAP:
  1644. ceph_osdc_handle_map(osdc, msg);
  1645. break;
  1646. case CEPH_MSG_OSD_OPREPLY:
  1647. handle_reply(osdc, msg, con);
  1648. break;
  1649. case CEPH_MSG_WATCH_NOTIFY:
  1650. handle_watch_notify(osdc, msg);
  1651. break;
  1652. default:
  1653. pr_err("received unknown message type %d %s\n", type,
  1654. ceph_msg_type_name(type));
  1655. }
  1656. out:
  1657. ceph_msg_put(msg);
  1658. }
  1659. /*
  1660. * lookup and return message for incoming reply. set up reply message
  1661. * pages.
  1662. */
  1663. static struct ceph_msg *get_reply(struct ceph_connection *con,
  1664. struct ceph_msg_header *hdr,
  1665. int *skip)
  1666. {
  1667. struct ceph_osd *osd = con->private;
  1668. struct ceph_osd_client *osdc = osd->o_osdc;
  1669. struct ceph_msg *m;
  1670. struct ceph_osd_request *req;
  1671. int front = le32_to_cpu(hdr->front_len);
  1672. int data_len = le32_to_cpu(hdr->data_len);
  1673. u64 tid;
  1674. tid = le64_to_cpu(hdr->tid);
  1675. mutex_lock(&osdc->request_mutex);
  1676. req = __lookup_request(osdc, tid);
  1677. if (!req) {
  1678. *skip = 1;
  1679. m = NULL;
  1680. dout("get_reply unknown tid %llu from osd%d\n", tid,
  1681. osd->o_osd);
  1682. goto out;
  1683. }
  1684. if (req->r_con_filling_msg) {
  1685. dout("%s revoking msg %p from old con %p\n", __func__,
  1686. req->r_reply, req->r_con_filling_msg);
  1687. ceph_msg_revoke_incoming(req->r_reply);
  1688. req->r_con_filling_msg->ops->put(req->r_con_filling_msg);
  1689. req->r_con_filling_msg = NULL;
  1690. }
  1691. if (front > req->r_reply->front.iov_len) {
  1692. pr_warning("get_reply front %d > preallocated %d\n",
  1693. front, (int)req->r_reply->front.iov_len);
  1694. m = ceph_msg_new(CEPH_MSG_OSD_OPREPLY, front, GFP_NOFS, false);
  1695. if (!m)
  1696. goto out;
  1697. ceph_msg_put(req->r_reply);
  1698. req->r_reply = m;
  1699. }
  1700. m = ceph_msg_get(req->r_reply);
  1701. if (data_len > 0) {
  1702. int want = calc_pages_for(req->r_page_alignment, data_len);
  1703. if (req->r_pages && unlikely(req->r_num_pages < want)) {
  1704. pr_warning("tid %lld reply has %d bytes %d pages, we"
  1705. " had only %d pages ready\n", tid, data_len,
  1706. want, req->r_num_pages);
  1707. *skip = 1;
  1708. ceph_msg_put(m);
  1709. m = NULL;
  1710. goto out;
  1711. }
  1712. m->pages = req->r_pages;
  1713. m->nr_pages = req->r_num_pages;
  1714. m->page_alignment = req->r_page_alignment;
  1715. #ifdef CONFIG_BLOCK
  1716. m->bio = req->r_bio;
  1717. #endif
  1718. }
  1719. *skip = 0;
  1720. req->r_con_filling_msg = con->ops->get(con);
  1721. dout("get_reply tid %lld %p\n", tid, m);
  1722. out:
  1723. mutex_unlock(&osdc->request_mutex);
  1724. return m;
  1725. }
  1726. static struct ceph_msg *alloc_msg(struct ceph_connection *con,
  1727. struct ceph_msg_header *hdr,
  1728. int *skip)
  1729. {
  1730. struct ceph_osd *osd = con->private;
  1731. int type = le16_to_cpu(hdr->type);
  1732. int front = le32_to_cpu(hdr->front_len);
  1733. *skip = 0;
  1734. switch (type) {
  1735. case CEPH_MSG_OSD_MAP:
  1736. case CEPH_MSG_WATCH_NOTIFY:
  1737. return ceph_msg_new(type, front, GFP_NOFS, false);
  1738. case CEPH_MSG_OSD_OPREPLY:
  1739. return get_reply(con, hdr, skip);
  1740. default:
  1741. pr_info("alloc_msg unexpected msg type %d from osd%d\n", type,
  1742. osd->o_osd);
  1743. *skip = 1;
  1744. return NULL;
  1745. }
  1746. }
  1747. /*
  1748. * Wrappers to refcount containing ceph_osd struct
  1749. */
  1750. static struct ceph_connection *get_osd_con(struct ceph_connection *con)
  1751. {
  1752. struct ceph_osd *osd = con->private;
  1753. if (get_osd(osd))
  1754. return con;
  1755. return NULL;
  1756. }
  1757. static void put_osd_con(struct ceph_connection *con)
  1758. {
  1759. struct ceph_osd *osd = con->private;
  1760. put_osd(osd);
  1761. }
  1762. /*
  1763. * authentication
  1764. */
  1765. /*
  1766. * Note: returned pointer is the address of a structure that's
  1767. * managed separately. Caller must *not* attempt to free it.
  1768. */
  1769. static struct ceph_auth_handshake *get_authorizer(struct ceph_connection *con,
  1770. int *proto, int force_new)
  1771. {
  1772. struct ceph_osd *o = con->private;
  1773. struct ceph_osd_client *osdc = o->o_osdc;
  1774. struct ceph_auth_client *ac = osdc->client->monc.auth;
  1775. struct ceph_auth_handshake *auth = &o->o_auth;
  1776. if (force_new && auth->authorizer) {
  1777. if (ac->ops && ac->ops->destroy_authorizer)
  1778. ac->ops->destroy_authorizer(ac, auth->authorizer);
  1779. auth->authorizer = NULL;
  1780. }
  1781. if (!auth->authorizer && ac->ops && ac->ops->create_authorizer) {
  1782. int ret = ac->ops->create_authorizer(ac, CEPH_ENTITY_TYPE_OSD,
  1783. auth);
  1784. if (ret)
  1785. return ERR_PTR(ret);
  1786. }
  1787. *proto = ac->protocol;
  1788. return auth;
  1789. }
  1790. static int verify_authorizer_reply(struct ceph_connection *con, int len)
  1791. {
  1792. struct ceph_osd *o = con->private;
  1793. struct ceph_osd_client *osdc = o->o_osdc;
  1794. struct ceph_auth_client *ac = osdc->client->monc.auth;
  1795. /*
  1796. * XXX If ac->ops or ac->ops->verify_authorizer_reply is null,
  1797. * XXX which do we do: succeed or fail?
  1798. */
  1799. return ac->ops->verify_authorizer_reply(ac, o->o_auth.authorizer, len);
  1800. }
  1801. static int invalidate_authorizer(struct ceph_connection *con)
  1802. {
  1803. struct ceph_osd *o = con->private;
  1804. struct ceph_osd_client *osdc = o->o_osdc;
  1805. struct ceph_auth_client *ac = osdc->client->monc.auth;
  1806. if (ac->ops && ac->ops->invalidate_authorizer)
  1807. ac->ops->invalidate_authorizer(ac, CEPH_ENTITY_TYPE_OSD);
  1808. return ceph_monc_validate_auth(&osdc->client->monc);
  1809. }
  1810. static const struct ceph_connection_operations osd_con_ops = {
  1811. .get = get_osd_con,
  1812. .put = put_osd_con,
  1813. .dispatch = dispatch,
  1814. .get_authorizer = get_authorizer,
  1815. .verify_authorizer_reply = verify_authorizer_reply,
  1816. .invalidate_authorizer = invalidate_authorizer,
  1817. .alloc_msg = alloc_msg,
  1818. .fault = osd_reset,
  1819. };