osd_client.c 55 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163
  1. #include <linux/ceph/ceph_debug.h>
  2. #include <linux/module.h>
  3. #include <linux/err.h>
  4. #include <linux/highmem.h>
  5. #include <linux/mm.h>
  6. #include <linux/pagemap.h>
  7. #include <linux/slab.h>
  8. #include <linux/uaccess.h>
  9. #ifdef CONFIG_BLOCK
  10. #include <linux/bio.h>
  11. #endif
  12. #include <linux/ceph/libceph.h>
  13. #include <linux/ceph/osd_client.h>
  14. #include <linux/ceph/messenger.h>
  15. #include <linux/ceph/decode.h>
  16. #include <linux/ceph/auth.h>
  17. #include <linux/ceph/pagelist.h>
  18. #define OSD_OP_FRONT_LEN 4096
  19. #define OSD_OPREPLY_FRONT_LEN 512
  20. static const struct ceph_connection_operations osd_con_ops;
  21. static void send_queued(struct ceph_osd_client *osdc);
  22. static int __reset_osd(struct ceph_osd_client *osdc, struct ceph_osd *osd);
  23. static void __register_request(struct ceph_osd_client *osdc,
  24. struct ceph_osd_request *req);
  25. static void __unregister_linger_request(struct ceph_osd_client *osdc,
  26. struct ceph_osd_request *req);
  27. static int __send_request(struct ceph_osd_client *osdc,
  28. struct ceph_osd_request *req);
  29. static int op_needs_trail(int op)
  30. {
  31. switch (op) {
  32. case CEPH_OSD_OP_GETXATTR:
  33. case CEPH_OSD_OP_SETXATTR:
  34. case CEPH_OSD_OP_CMPXATTR:
  35. case CEPH_OSD_OP_CALL:
  36. case CEPH_OSD_OP_NOTIFY:
  37. return 1;
  38. default:
  39. return 0;
  40. }
  41. }
  42. static int op_has_extent(int op)
  43. {
  44. return (op == CEPH_OSD_OP_READ ||
  45. op == CEPH_OSD_OP_WRITE);
  46. }
  47. void ceph_calc_raw_layout(struct ceph_osd_client *osdc,
  48. struct ceph_file_layout *layout,
  49. u64 snapid,
  50. u64 off, u64 *plen, u64 *bno,
  51. struct ceph_osd_request *req,
  52. struct ceph_osd_req_op *op)
  53. {
  54. struct ceph_osd_request_head *reqhead = req->r_request->front.iov_base;
  55. u64 orig_len = *plen;
  56. u64 objoff, objlen; /* extent in object */
  57. reqhead->snapid = cpu_to_le64(snapid);
  58. /* object extent? */
  59. ceph_calc_file_object_mapping(layout, off, plen, bno,
  60. &objoff, &objlen);
  61. if (*plen < orig_len)
  62. dout(" skipping last %llu, final file extent %llu~%llu\n",
  63. orig_len - *plen, off, *plen);
  64. if (op_has_extent(op->op)) {
  65. op->extent.offset = objoff;
  66. op->extent.length = objlen;
  67. }
  68. req->r_num_pages = calc_pages_for(off, *plen);
  69. req->r_page_alignment = off & ~PAGE_MASK;
  70. if (op->op == CEPH_OSD_OP_WRITE)
  71. op->payload_len = *plen;
  72. dout("calc_layout bno=%llx %llu~%llu (%d pages)\n",
  73. *bno, objoff, objlen, req->r_num_pages);
  74. }
  75. EXPORT_SYMBOL(ceph_calc_raw_layout);
  76. /*
  77. * Implement client access to distributed object storage cluster.
  78. *
  79. * All data objects are stored within a cluster/cloud of OSDs, or
  80. * "object storage devices." (Note that Ceph OSDs have _nothing_ to
  81. * do with the T10 OSD extensions to SCSI.) Ceph OSDs are simply
  82. * remote daemons serving up and coordinating consistent and safe
  83. * access to storage.
  84. *
  85. * Cluster membership and the mapping of data objects onto storage devices
  86. * are described by the osd map.
  87. *
  88. * We keep track of pending OSD requests (read, write), resubmit
  89. * requests to different OSDs when the cluster topology/data layout
  90. * change, or retry the affected requests when the communications
  91. * channel with an OSD is reset.
  92. */
  93. /*
  94. * calculate the mapping of a file extent onto an object, and fill out the
  95. * request accordingly. shorten extent as necessary if it crosses an
  96. * object boundary.
  97. *
  98. * fill osd op in request message.
  99. */
  100. static void calc_layout(struct ceph_osd_client *osdc,
  101. struct ceph_vino vino,
  102. struct ceph_file_layout *layout,
  103. u64 off, u64 *plen,
  104. struct ceph_osd_request *req,
  105. struct ceph_osd_req_op *op)
  106. {
  107. u64 bno;
  108. ceph_calc_raw_layout(osdc, layout, vino.snap, off,
  109. plen, &bno, req, op);
  110. snprintf(req->r_oid, sizeof(req->r_oid), "%llx.%08llx", vino.ino, bno);
  111. req->r_oid_len = strlen(req->r_oid);
  112. }
  113. /*
  114. * requests
  115. */
  116. void ceph_osdc_release_request(struct kref *kref)
  117. {
  118. struct ceph_osd_request *req = container_of(kref,
  119. struct ceph_osd_request,
  120. r_kref);
  121. if (req->r_request)
  122. ceph_msg_put(req->r_request);
  123. if (req->r_reply)
  124. ceph_msg_put(req->r_reply);
  125. if (req->r_con_filling_msg) {
  126. dout("release_request revoking pages %p from con %p\n",
  127. req->r_pages, req->r_con_filling_msg);
  128. ceph_con_revoke_message(req->r_con_filling_msg,
  129. req->r_reply);
  130. ceph_con_put(req->r_con_filling_msg);
  131. }
  132. if (req->r_own_pages)
  133. ceph_release_page_vector(req->r_pages,
  134. req->r_num_pages);
  135. #ifdef CONFIG_BLOCK
  136. if (req->r_bio)
  137. bio_put(req->r_bio);
  138. #endif
  139. ceph_put_snap_context(req->r_snapc);
  140. if (req->r_trail) {
  141. ceph_pagelist_release(req->r_trail);
  142. kfree(req->r_trail);
  143. }
  144. if (req->r_mempool)
  145. mempool_free(req, req->r_osdc->req_mempool);
  146. else
  147. kfree(req);
  148. }
  149. EXPORT_SYMBOL(ceph_osdc_release_request);
  150. static int get_num_ops(struct ceph_osd_req_op *ops, int *needs_trail)
  151. {
  152. int i = 0;
  153. if (needs_trail)
  154. *needs_trail = 0;
  155. while (ops[i].op) {
  156. if (needs_trail && op_needs_trail(ops[i].op))
  157. *needs_trail = 1;
  158. i++;
  159. }
  160. return i;
  161. }
  162. struct ceph_osd_request *ceph_osdc_alloc_request(struct ceph_osd_client *osdc,
  163. int flags,
  164. struct ceph_snap_context *snapc,
  165. struct ceph_osd_req_op *ops,
  166. bool use_mempool,
  167. gfp_t gfp_flags,
  168. struct page **pages,
  169. struct bio *bio)
  170. {
  171. struct ceph_osd_request *req;
  172. struct ceph_msg *msg;
  173. int needs_trail;
  174. int num_op = get_num_ops(ops, &needs_trail);
  175. size_t msg_size = sizeof(struct ceph_osd_request_head);
  176. msg_size += num_op*sizeof(struct ceph_osd_op);
  177. if (use_mempool) {
  178. req = mempool_alloc(osdc->req_mempool, gfp_flags);
  179. memset(req, 0, sizeof(*req));
  180. } else {
  181. req = kzalloc(sizeof(*req), gfp_flags);
  182. }
  183. if (req == NULL)
  184. return NULL;
  185. req->r_osdc = osdc;
  186. req->r_mempool = use_mempool;
  187. kref_init(&req->r_kref);
  188. init_completion(&req->r_completion);
  189. init_completion(&req->r_safe_completion);
  190. INIT_LIST_HEAD(&req->r_unsafe_item);
  191. INIT_LIST_HEAD(&req->r_linger_item);
  192. INIT_LIST_HEAD(&req->r_linger_osd);
  193. req->r_flags = flags;
  194. WARN_ON((flags & (CEPH_OSD_FLAG_READ|CEPH_OSD_FLAG_WRITE)) == 0);
  195. /* create reply message */
  196. if (use_mempool)
  197. msg = ceph_msgpool_get(&osdc->msgpool_op_reply, 0);
  198. else
  199. msg = ceph_msg_new(CEPH_MSG_OSD_OPREPLY,
  200. OSD_OPREPLY_FRONT_LEN, gfp_flags);
  201. if (!msg) {
  202. ceph_osdc_put_request(req);
  203. return NULL;
  204. }
  205. req->r_reply = msg;
  206. /* allocate space for the trailing data */
  207. if (needs_trail) {
  208. req->r_trail = kmalloc(sizeof(struct ceph_pagelist), gfp_flags);
  209. if (!req->r_trail) {
  210. ceph_osdc_put_request(req);
  211. return NULL;
  212. }
  213. ceph_pagelist_init(req->r_trail);
  214. }
  215. /* create request message; allow space for oid */
  216. msg_size += 40;
  217. if (snapc)
  218. msg_size += sizeof(u64) * snapc->num_snaps;
  219. if (use_mempool)
  220. msg = ceph_msgpool_get(&osdc->msgpool_op, 0);
  221. else
  222. msg = ceph_msg_new(CEPH_MSG_OSD_OP, msg_size, gfp_flags);
  223. if (!msg) {
  224. ceph_osdc_put_request(req);
  225. return NULL;
  226. }
  227. msg->hdr.type = cpu_to_le16(CEPH_MSG_OSD_OP);
  228. memset(msg->front.iov_base, 0, msg->front.iov_len);
  229. req->r_request = msg;
  230. req->r_pages = pages;
  231. #ifdef CONFIG_BLOCK
  232. if (bio) {
  233. req->r_bio = bio;
  234. bio_get(req->r_bio);
  235. }
  236. #endif
  237. return req;
  238. }
  239. EXPORT_SYMBOL(ceph_osdc_alloc_request);
  240. static void osd_req_encode_op(struct ceph_osd_request *req,
  241. struct ceph_osd_op *dst,
  242. struct ceph_osd_req_op *src)
  243. {
  244. dst->op = cpu_to_le16(src->op);
  245. switch (dst->op) {
  246. case CEPH_OSD_OP_READ:
  247. case CEPH_OSD_OP_WRITE:
  248. dst->extent.offset =
  249. cpu_to_le64(src->extent.offset);
  250. dst->extent.length =
  251. cpu_to_le64(src->extent.length);
  252. dst->extent.truncate_size =
  253. cpu_to_le64(src->extent.truncate_size);
  254. dst->extent.truncate_seq =
  255. cpu_to_le32(src->extent.truncate_seq);
  256. break;
  257. case CEPH_OSD_OP_GETXATTR:
  258. case CEPH_OSD_OP_SETXATTR:
  259. case CEPH_OSD_OP_CMPXATTR:
  260. BUG_ON(!req->r_trail);
  261. dst->xattr.name_len = cpu_to_le32(src->xattr.name_len);
  262. dst->xattr.value_len = cpu_to_le32(src->xattr.value_len);
  263. dst->xattr.cmp_op = src->xattr.cmp_op;
  264. dst->xattr.cmp_mode = src->xattr.cmp_mode;
  265. ceph_pagelist_append(req->r_trail, src->xattr.name,
  266. src->xattr.name_len);
  267. ceph_pagelist_append(req->r_trail, src->xattr.val,
  268. src->xattr.value_len);
  269. break;
  270. case CEPH_OSD_OP_CALL:
  271. BUG_ON(!req->r_trail);
  272. dst->cls.class_len = src->cls.class_len;
  273. dst->cls.method_len = src->cls.method_len;
  274. dst->cls.indata_len = cpu_to_le32(src->cls.indata_len);
  275. ceph_pagelist_append(req->r_trail, src->cls.class_name,
  276. src->cls.class_len);
  277. ceph_pagelist_append(req->r_trail, src->cls.method_name,
  278. src->cls.method_len);
  279. ceph_pagelist_append(req->r_trail, src->cls.indata,
  280. src->cls.indata_len);
  281. break;
  282. case CEPH_OSD_OP_ROLLBACK:
  283. dst->snap.snapid = cpu_to_le64(src->snap.snapid);
  284. break;
  285. case CEPH_OSD_OP_STARTSYNC:
  286. break;
  287. case CEPH_OSD_OP_NOTIFY:
  288. {
  289. __le32 prot_ver = cpu_to_le32(src->watch.prot_ver);
  290. __le32 timeout = cpu_to_le32(src->watch.timeout);
  291. BUG_ON(!req->r_trail);
  292. ceph_pagelist_append(req->r_trail,
  293. &prot_ver, sizeof(prot_ver));
  294. ceph_pagelist_append(req->r_trail,
  295. &timeout, sizeof(timeout));
  296. }
  297. case CEPH_OSD_OP_NOTIFY_ACK:
  298. case CEPH_OSD_OP_WATCH:
  299. dst->watch.cookie = cpu_to_le64(src->watch.cookie);
  300. dst->watch.ver = cpu_to_le64(src->watch.ver);
  301. dst->watch.flag = src->watch.flag;
  302. break;
  303. default:
  304. pr_err("unrecognized osd opcode %d\n", dst->op);
  305. WARN_ON(1);
  306. break;
  307. }
  308. dst->payload_len = cpu_to_le32(src->payload_len);
  309. }
  310. /*
  311. * build new request AND message
  312. *
  313. */
  314. void ceph_osdc_build_request(struct ceph_osd_request *req,
  315. u64 off, u64 *plen,
  316. struct ceph_osd_req_op *src_ops,
  317. struct ceph_snap_context *snapc,
  318. struct timespec *mtime,
  319. const char *oid,
  320. int oid_len)
  321. {
  322. struct ceph_msg *msg = req->r_request;
  323. struct ceph_osd_request_head *head;
  324. struct ceph_osd_req_op *src_op;
  325. struct ceph_osd_op *op;
  326. void *p;
  327. int num_op = get_num_ops(src_ops, NULL);
  328. size_t msg_size = sizeof(*head) + num_op*sizeof(*op);
  329. int flags = req->r_flags;
  330. u64 data_len = 0;
  331. int i;
  332. head = msg->front.iov_base;
  333. op = (void *)(head + 1);
  334. p = (void *)(op + num_op);
  335. req->r_snapc = ceph_get_snap_context(snapc);
  336. head->client_inc = cpu_to_le32(1); /* always, for now. */
  337. head->flags = cpu_to_le32(flags);
  338. if (flags & CEPH_OSD_FLAG_WRITE)
  339. ceph_encode_timespec(&head->mtime, mtime);
  340. head->num_ops = cpu_to_le16(num_op);
  341. /* fill in oid */
  342. head->object_len = cpu_to_le32(oid_len);
  343. memcpy(p, oid, oid_len);
  344. p += oid_len;
  345. src_op = src_ops;
  346. while (src_op->op) {
  347. osd_req_encode_op(req, op, src_op);
  348. src_op++;
  349. op++;
  350. }
  351. if (req->r_trail)
  352. data_len += req->r_trail->length;
  353. if (snapc) {
  354. head->snap_seq = cpu_to_le64(snapc->seq);
  355. head->num_snaps = cpu_to_le32(snapc->num_snaps);
  356. for (i = 0; i < snapc->num_snaps; i++) {
  357. put_unaligned_le64(snapc->snaps[i], p);
  358. p += sizeof(u64);
  359. }
  360. }
  361. if (flags & CEPH_OSD_FLAG_WRITE) {
  362. req->r_request->hdr.data_off = cpu_to_le16(off);
  363. req->r_request->hdr.data_len = cpu_to_le32(*plen + data_len);
  364. } else if (data_len) {
  365. req->r_request->hdr.data_off = 0;
  366. req->r_request->hdr.data_len = cpu_to_le32(data_len);
  367. }
  368. req->r_request->page_alignment = req->r_page_alignment;
  369. BUG_ON(p > msg->front.iov_base + msg->front.iov_len);
  370. msg_size = p - msg->front.iov_base;
  371. msg->front.iov_len = msg_size;
  372. msg->hdr.front_len = cpu_to_le32(msg_size);
  373. return;
  374. }
  375. EXPORT_SYMBOL(ceph_osdc_build_request);
  376. /*
  377. * build new request AND message, calculate layout, and adjust file
  378. * extent as needed.
  379. *
  380. * if the file was recently truncated, we include information about its
  381. * old and new size so that the object can be updated appropriately. (we
  382. * avoid synchronously deleting truncated objects because it's slow.)
  383. *
  384. * if @do_sync, include a 'startsync' command so that the osd will flush
  385. * data quickly.
  386. */
  387. struct ceph_osd_request *ceph_osdc_new_request(struct ceph_osd_client *osdc,
  388. struct ceph_file_layout *layout,
  389. struct ceph_vino vino,
  390. u64 off, u64 *plen,
  391. int opcode, int flags,
  392. struct ceph_snap_context *snapc,
  393. int do_sync,
  394. u32 truncate_seq,
  395. u64 truncate_size,
  396. struct timespec *mtime,
  397. bool use_mempool, int num_reply,
  398. int page_align)
  399. {
  400. struct ceph_osd_req_op ops[3];
  401. struct ceph_osd_request *req;
  402. ops[0].op = opcode;
  403. ops[0].extent.truncate_seq = truncate_seq;
  404. ops[0].extent.truncate_size = truncate_size;
  405. ops[0].payload_len = 0;
  406. if (do_sync) {
  407. ops[1].op = CEPH_OSD_OP_STARTSYNC;
  408. ops[1].payload_len = 0;
  409. ops[2].op = 0;
  410. } else
  411. ops[1].op = 0;
  412. req = ceph_osdc_alloc_request(osdc, flags,
  413. snapc, ops,
  414. use_mempool,
  415. GFP_NOFS, NULL, NULL);
  416. if (!req)
  417. return NULL;
  418. /* calculate max write size */
  419. calc_layout(osdc, vino, layout, off, plen, req, ops);
  420. req->r_file_layout = *layout; /* keep a copy */
  421. /* in case it differs from natural (file) alignment that
  422. calc_layout filled in for us */
  423. req->r_num_pages = calc_pages_for(page_align, *plen);
  424. req->r_page_alignment = page_align;
  425. ceph_osdc_build_request(req, off, plen, ops,
  426. snapc,
  427. mtime,
  428. req->r_oid, req->r_oid_len);
  429. return req;
  430. }
  431. EXPORT_SYMBOL(ceph_osdc_new_request);
  432. /*
  433. * We keep osd requests in an rbtree, sorted by ->r_tid.
  434. */
  435. static void __insert_request(struct ceph_osd_client *osdc,
  436. struct ceph_osd_request *new)
  437. {
  438. struct rb_node **p = &osdc->requests.rb_node;
  439. struct rb_node *parent = NULL;
  440. struct ceph_osd_request *req = NULL;
  441. while (*p) {
  442. parent = *p;
  443. req = rb_entry(parent, struct ceph_osd_request, r_node);
  444. if (new->r_tid < req->r_tid)
  445. p = &(*p)->rb_left;
  446. else if (new->r_tid > req->r_tid)
  447. p = &(*p)->rb_right;
  448. else
  449. BUG();
  450. }
  451. rb_link_node(&new->r_node, parent, p);
  452. rb_insert_color(&new->r_node, &osdc->requests);
  453. }
  454. static struct ceph_osd_request *__lookup_request(struct ceph_osd_client *osdc,
  455. u64 tid)
  456. {
  457. struct ceph_osd_request *req;
  458. struct rb_node *n = osdc->requests.rb_node;
  459. while (n) {
  460. req = rb_entry(n, struct ceph_osd_request, r_node);
  461. if (tid < req->r_tid)
  462. n = n->rb_left;
  463. else if (tid > req->r_tid)
  464. n = n->rb_right;
  465. else
  466. return req;
  467. }
  468. return NULL;
  469. }
  470. static struct ceph_osd_request *
  471. __lookup_request_ge(struct ceph_osd_client *osdc,
  472. u64 tid)
  473. {
  474. struct ceph_osd_request *req;
  475. struct rb_node *n = osdc->requests.rb_node;
  476. while (n) {
  477. req = rb_entry(n, struct ceph_osd_request, r_node);
  478. if (tid < req->r_tid) {
  479. if (!n->rb_left)
  480. return req;
  481. n = n->rb_left;
  482. } else if (tid > req->r_tid) {
  483. n = n->rb_right;
  484. } else {
  485. return req;
  486. }
  487. }
  488. return NULL;
  489. }
  490. /*
  491. * Resubmit requests pending on the given osd.
  492. */
  493. static void __kick_osd_requests(struct ceph_osd_client *osdc,
  494. struct ceph_osd *osd)
  495. {
  496. struct ceph_osd_request *req, *nreq;
  497. int err;
  498. dout("__kick_osd_requests osd%d\n", osd->o_osd);
  499. err = __reset_osd(osdc, osd);
  500. if (err == -EAGAIN)
  501. return;
  502. list_for_each_entry(req, &osd->o_requests, r_osd_item) {
  503. list_move(&req->r_req_lru_item, &osdc->req_unsent);
  504. dout("requeued %p tid %llu osd%d\n", req, req->r_tid,
  505. osd->o_osd);
  506. if (!req->r_linger)
  507. req->r_flags |= CEPH_OSD_FLAG_RETRY;
  508. }
  509. list_for_each_entry_safe(req, nreq, &osd->o_linger_requests,
  510. r_linger_osd) {
  511. /*
  512. * reregister request prior to unregistering linger so
  513. * that r_osd is preserved.
  514. */
  515. BUG_ON(!list_empty(&req->r_req_lru_item));
  516. __register_request(osdc, req);
  517. list_add(&req->r_req_lru_item, &osdc->req_unsent);
  518. list_add(&req->r_osd_item, &req->r_osd->o_requests);
  519. __unregister_linger_request(osdc, req);
  520. dout("requeued lingering %p tid %llu osd%d\n", req, req->r_tid,
  521. osd->o_osd);
  522. }
  523. }
  524. static void kick_osd_requests(struct ceph_osd_client *osdc,
  525. struct ceph_osd *kickosd)
  526. {
  527. mutex_lock(&osdc->request_mutex);
  528. __kick_osd_requests(osdc, kickosd);
  529. mutex_unlock(&osdc->request_mutex);
  530. }
  531. /*
  532. * If the osd connection drops, we need to resubmit all requests.
  533. */
  534. static void osd_reset(struct ceph_connection *con)
  535. {
  536. struct ceph_osd *osd = con->private;
  537. struct ceph_osd_client *osdc;
  538. if (!osd)
  539. return;
  540. dout("osd_reset osd%d\n", osd->o_osd);
  541. osdc = osd->o_osdc;
  542. down_read(&osdc->map_sem);
  543. kick_osd_requests(osdc, osd);
  544. send_queued(osdc);
  545. up_read(&osdc->map_sem);
  546. }
  547. /*
  548. * Track open sessions with osds.
  549. */
  550. static struct ceph_osd *create_osd(struct ceph_osd_client *osdc)
  551. {
  552. struct ceph_osd *osd;
  553. osd = kzalloc(sizeof(*osd), GFP_NOFS);
  554. if (!osd)
  555. return NULL;
  556. atomic_set(&osd->o_ref, 1);
  557. osd->o_osdc = osdc;
  558. INIT_LIST_HEAD(&osd->o_requests);
  559. INIT_LIST_HEAD(&osd->o_linger_requests);
  560. INIT_LIST_HEAD(&osd->o_osd_lru);
  561. osd->o_incarnation = 1;
  562. ceph_con_init(osdc->client->msgr, &osd->o_con);
  563. osd->o_con.private = osd;
  564. osd->o_con.ops = &osd_con_ops;
  565. osd->o_con.peer_name.type = CEPH_ENTITY_TYPE_OSD;
  566. INIT_LIST_HEAD(&osd->o_keepalive_item);
  567. return osd;
  568. }
  569. static struct ceph_osd *get_osd(struct ceph_osd *osd)
  570. {
  571. if (atomic_inc_not_zero(&osd->o_ref)) {
  572. dout("get_osd %p %d -> %d\n", osd, atomic_read(&osd->o_ref)-1,
  573. atomic_read(&osd->o_ref));
  574. return osd;
  575. } else {
  576. dout("get_osd %p FAIL\n", osd);
  577. return NULL;
  578. }
  579. }
  580. static void put_osd(struct ceph_osd *osd)
  581. {
  582. dout("put_osd %p %d -> %d\n", osd, atomic_read(&osd->o_ref),
  583. atomic_read(&osd->o_ref) - 1);
  584. if (atomic_dec_and_test(&osd->o_ref)) {
  585. struct ceph_auth_client *ac = osd->o_osdc->client->monc.auth;
  586. if (osd->o_authorizer)
  587. ac->ops->destroy_authorizer(ac, osd->o_authorizer);
  588. kfree(osd);
  589. }
  590. }
  591. /*
  592. * remove an osd from our map
  593. */
  594. static void __remove_osd(struct ceph_osd_client *osdc, struct ceph_osd *osd)
  595. {
  596. dout("__remove_osd %p\n", osd);
  597. BUG_ON(!list_empty(&osd->o_requests));
  598. rb_erase(&osd->o_node, &osdc->osds);
  599. list_del_init(&osd->o_osd_lru);
  600. ceph_con_close(&osd->o_con);
  601. put_osd(osd);
  602. }
  603. static void __move_osd_to_lru(struct ceph_osd_client *osdc,
  604. struct ceph_osd *osd)
  605. {
  606. dout("__move_osd_to_lru %p\n", osd);
  607. BUG_ON(!list_empty(&osd->o_osd_lru));
  608. list_add_tail(&osd->o_osd_lru, &osdc->osd_lru);
  609. osd->lru_ttl = jiffies + osdc->client->options->osd_idle_ttl * HZ;
  610. }
  611. static void __remove_osd_from_lru(struct ceph_osd *osd)
  612. {
  613. dout("__remove_osd_from_lru %p\n", osd);
  614. if (!list_empty(&osd->o_osd_lru))
  615. list_del_init(&osd->o_osd_lru);
  616. }
  617. static void remove_old_osds(struct ceph_osd_client *osdc, int remove_all)
  618. {
  619. struct ceph_osd *osd, *nosd;
  620. dout("__remove_old_osds %p\n", osdc);
  621. mutex_lock(&osdc->request_mutex);
  622. list_for_each_entry_safe(osd, nosd, &osdc->osd_lru, o_osd_lru) {
  623. if (!remove_all && time_before(jiffies, osd->lru_ttl))
  624. break;
  625. __remove_osd(osdc, osd);
  626. }
  627. mutex_unlock(&osdc->request_mutex);
  628. }
  629. /*
  630. * reset osd connect
  631. */
  632. static int __reset_osd(struct ceph_osd_client *osdc, struct ceph_osd *osd)
  633. {
  634. struct ceph_osd_request *req;
  635. int ret = 0;
  636. dout("__reset_osd %p osd%d\n", osd, osd->o_osd);
  637. if (list_empty(&osd->o_requests) &&
  638. list_empty(&osd->o_linger_requests)) {
  639. __remove_osd(osdc, osd);
  640. } else if (memcmp(&osdc->osdmap->osd_addr[osd->o_osd],
  641. &osd->o_con.peer_addr,
  642. sizeof(osd->o_con.peer_addr)) == 0 &&
  643. !ceph_con_opened(&osd->o_con)) {
  644. dout(" osd addr hasn't changed and connection never opened,"
  645. " letting msgr retry");
  646. /* touch each r_stamp for handle_timeout()'s benfit */
  647. list_for_each_entry(req, &osd->o_requests, r_osd_item)
  648. req->r_stamp = jiffies;
  649. ret = -EAGAIN;
  650. } else {
  651. ceph_con_close(&osd->o_con);
  652. ceph_con_open(&osd->o_con, &osdc->osdmap->osd_addr[osd->o_osd]);
  653. osd->o_incarnation++;
  654. }
  655. return ret;
  656. }
  657. static void __insert_osd(struct ceph_osd_client *osdc, struct ceph_osd *new)
  658. {
  659. struct rb_node **p = &osdc->osds.rb_node;
  660. struct rb_node *parent = NULL;
  661. struct ceph_osd *osd = NULL;
  662. while (*p) {
  663. parent = *p;
  664. osd = rb_entry(parent, struct ceph_osd, o_node);
  665. if (new->o_osd < osd->o_osd)
  666. p = &(*p)->rb_left;
  667. else if (new->o_osd > osd->o_osd)
  668. p = &(*p)->rb_right;
  669. else
  670. BUG();
  671. }
  672. rb_link_node(&new->o_node, parent, p);
  673. rb_insert_color(&new->o_node, &osdc->osds);
  674. }
  675. static struct ceph_osd *__lookup_osd(struct ceph_osd_client *osdc, int o)
  676. {
  677. struct ceph_osd *osd;
  678. struct rb_node *n = osdc->osds.rb_node;
  679. while (n) {
  680. osd = rb_entry(n, struct ceph_osd, o_node);
  681. if (o < osd->o_osd)
  682. n = n->rb_left;
  683. else if (o > osd->o_osd)
  684. n = n->rb_right;
  685. else
  686. return osd;
  687. }
  688. return NULL;
  689. }
  690. static void __schedule_osd_timeout(struct ceph_osd_client *osdc)
  691. {
  692. schedule_delayed_work(&osdc->timeout_work,
  693. osdc->client->options->osd_keepalive_timeout * HZ);
  694. }
  695. static void __cancel_osd_timeout(struct ceph_osd_client *osdc)
  696. {
  697. cancel_delayed_work(&osdc->timeout_work);
  698. }
  699. /*
  700. * Register request, assign tid. If this is the first request, set up
  701. * the timeout event.
  702. */
  703. static void __register_request(struct ceph_osd_client *osdc,
  704. struct ceph_osd_request *req)
  705. {
  706. req->r_tid = ++osdc->last_tid;
  707. req->r_request->hdr.tid = cpu_to_le64(req->r_tid);
  708. INIT_LIST_HEAD(&req->r_req_lru_item);
  709. dout("__register_request %p tid %lld\n", req, req->r_tid);
  710. __insert_request(osdc, req);
  711. ceph_osdc_get_request(req);
  712. osdc->num_requests++;
  713. if (osdc->num_requests == 1) {
  714. dout(" first request, scheduling timeout\n");
  715. __schedule_osd_timeout(osdc);
  716. }
  717. }
  718. static void register_request(struct ceph_osd_client *osdc,
  719. struct ceph_osd_request *req)
  720. {
  721. mutex_lock(&osdc->request_mutex);
  722. __register_request(osdc, req);
  723. mutex_unlock(&osdc->request_mutex);
  724. }
  725. /*
  726. * called under osdc->request_mutex
  727. */
  728. static void __unregister_request(struct ceph_osd_client *osdc,
  729. struct ceph_osd_request *req)
  730. {
  731. dout("__unregister_request %p tid %lld\n", req, req->r_tid);
  732. rb_erase(&req->r_node, &osdc->requests);
  733. osdc->num_requests--;
  734. if (req->r_osd) {
  735. /* make sure the original request isn't in flight. */
  736. ceph_con_revoke(&req->r_osd->o_con, req->r_request);
  737. list_del_init(&req->r_osd_item);
  738. if (list_empty(&req->r_osd->o_requests) &&
  739. list_empty(&req->r_osd->o_linger_requests)) {
  740. dout("moving osd to %p lru\n", req->r_osd);
  741. __move_osd_to_lru(osdc, req->r_osd);
  742. }
  743. if (list_empty(&req->r_linger_item))
  744. req->r_osd = NULL;
  745. }
  746. ceph_osdc_put_request(req);
  747. list_del_init(&req->r_req_lru_item);
  748. if (osdc->num_requests == 0) {
  749. dout(" no requests, canceling timeout\n");
  750. __cancel_osd_timeout(osdc);
  751. }
  752. }
  753. /*
  754. * Cancel a previously queued request message
  755. */
  756. static void __cancel_request(struct ceph_osd_request *req)
  757. {
  758. if (req->r_sent && req->r_osd) {
  759. ceph_con_revoke(&req->r_osd->o_con, req->r_request);
  760. req->r_sent = 0;
  761. }
  762. }
  763. static void __register_linger_request(struct ceph_osd_client *osdc,
  764. struct ceph_osd_request *req)
  765. {
  766. dout("__register_linger_request %p\n", req);
  767. list_add_tail(&req->r_linger_item, &osdc->req_linger);
  768. list_add_tail(&req->r_linger_osd, &req->r_osd->o_linger_requests);
  769. }
  770. static void __unregister_linger_request(struct ceph_osd_client *osdc,
  771. struct ceph_osd_request *req)
  772. {
  773. dout("__unregister_linger_request %p\n", req);
  774. if (req->r_osd) {
  775. list_del_init(&req->r_linger_item);
  776. list_del_init(&req->r_linger_osd);
  777. if (list_empty(&req->r_osd->o_requests) &&
  778. list_empty(&req->r_osd->o_linger_requests)) {
  779. dout("moving osd to %p lru\n", req->r_osd);
  780. __move_osd_to_lru(osdc, req->r_osd);
  781. }
  782. if (list_empty(&req->r_osd_item))
  783. req->r_osd = NULL;
  784. }
  785. }
  786. void ceph_osdc_unregister_linger_request(struct ceph_osd_client *osdc,
  787. struct ceph_osd_request *req)
  788. {
  789. mutex_lock(&osdc->request_mutex);
  790. if (req->r_linger) {
  791. __unregister_linger_request(osdc, req);
  792. ceph_osdc_put_request(req);
  793. }
  794. mutex_unlock(&osdc->request_mutex);
  795. }
  796. EXPORT_SYMBOL(ceph_osdc_unregister_linger_request);
  797. void ceph_osdc_set_request_linger(struct ceph_osd_client *osdc,
  798. struct ceph_osd_request *req)
  799. {
  800. if (!req->r_linger) {
  801. dout("set_request_linger %p\n", req);
  802. req->r_linger = 1;
  803. /*
  804. * caller is now responsible for calling
  805. * unregister_linger_request
  806. */
  807. ceph_osdc_get_request(req);
  808. }
  809. }
  810. EXPORT_SYMBOL(ceph_osdc_set_request_linger);
  811. /*
  812. * Pick an osd (the first 'up' osd in the pg), allocate the osd struct
  813. * (as needed), and set the request r_osd appropriately. If there is
  814. * no up osd, set r_osd to NULL. Move the request to the appropriate list
  815. * (unsent, homeless) or leave on in-flight lru.
  816. *
  817. * Return 0 if unchanged, 1 if changed, or negative on error.
  818. *
  819. * Caller should hold map_sem for read and request_mutex.
  820. */
  821. static int __map_request(struct ceph_osd_client *osdc,
  822. struct ceph_osd_request *req)
  823. {
  824. struct ceph_osd_request_head *reqhead = req->r_request->front.iov_base;
  825. struct ceph_pg pgid;
  826. int acting[CEPH_PG_MAX_SIZE];
  827. int o = -1, num = 0;
  828. int err;
  829. dout("map_request %p tid %lld\n", req, req->r_tid);
  830. err = ceph_calc_object_layout(&reqhead->layout, req->r_oid,
  831. &req->r_file_layout, osdc->osdmap);
  832. if (err) {
  833. list_move(&req->r_req_lru_item, &osdc->req_notarget);
  834. return err;
  835. }
  836. pgid = reqhead->layout.ol_pgid;
  837. req->r_pgid = pgid;
  838. err = ceph_calc_pg_acting(osdc->osdmap, pgid, acting);
  839. if (err > 0) {
  840. o = acting[0];
  841. num = err;
  842. }
  843. if ((req->r_osd && req->r_osd->o_osd == o &&
  844. req->r_sent >= req->r_osd->o_incarnation &&
  845. req->r_num_pg_osds == num &&
  846. memcmp(req->r_pg_osds, acting, sizeof(acting[0])*num) == 0) ||
  847. (req->r_osd == NULL && o == -1))
  848. return 0; /* no change */
  849. dout("map_request tid %llu pgid %d.%x osd%d (was osd%d)\n",
  850. req->r_tid, le32_to_cpu(pgid.pool), le16_to_cpu(pgid.ps), o,
  851. req->r_osd ? req->r_osd->o_osd : -1);
  852. /* record full pg acting set */
  853. memcpy(req->r_pg_osds, acting, sizeof(acting[0]) * num);
  854. req->r_num_pg_osds = num;
  855. if (req->r_osd) {
  856. __cancel_request(req);
  857. list_del_init(&req->r_osd_item);
  858. req->r_osd = NULL;
  859. }
  860. req->r_osd = __lookup_osd(osdc, o);
  861. if (!req->r_osd && o >= 0) {
  862. err = -ENOMEM;
  863. req->r_osd = create_osd(osdc);
  864. if (!req->r_osd) {
  865. list_move(&req->r_req_lru_item, &osdc->req_notarget);
  866. goto out;
  867. }
  868. dout("map_request osd %p is osd%d\n", req->r_osd, o);
  869. req->r_osd->o_osd = o;
  870. req->r_osd->o_con.peer_name.num = cpu_to_le64(o);
  871. __insert_osd(osdc, req->r_osd);
  872. ceph_con_open(&req->r_osd->o_con, &osdc->osdmap->osd_addr[o]);
  873. }
  874. if (req->r_osd) {
  875. __remove_osd_from_lru(req->r_osd);
  876. list_add(&req->r_osd_item, &req->r_osd->o_requests);
  877. list_move(&req->r_req_lru_item, &osdc->req_unsent);
  878. } else {
  879. list_move(&req->r_req_lru_item, &osdc->req_notarget);
  880. }
  881. err = 1; /* osd or pg changed */
  882. out:
  883. return err;
  884. }
  885. /*
  886. * caller should hold map_sem (for read) and request_mutex
  887. */
  888. static int __send_request(struct ceph_osd_client *osdc,
  889. struct ceph_osd_request *req)
  890. {
  891. struct ceph_osd_request_head *reqhead;
  892. dout("send_request %p tid %llu to osd%d flags %d\n",
  893. req, req->r_tid, req->r_osd->o_osd, req->r_flags);
  894. reqhead = req->r_request->front.iov_base;
  895. reqhead->osdmap_epoch = cpu_to_le32(osdc->osdmap->epoch);
  896. reqhead->flags |= cpu_to_le32(req->r_flags); /* e.g., RETRY */
  897. reqhead->reassert_version = req->r_reassert_version;
  898. req->r_stamp = jiffies;
  899. list_move_tail(&req->r_req_lru_item, &osdc->req_lru);
  900. ceph_msg_get(req->r_request); /* send consumes a ref */
  901. ceph_con_send(&req->r_osd->o_con, req->r_request);
  902. req->r_sent = req->r_osd->o_incarnation;
  903. return 0;
  904. }
  905. /*
  906. * Send any requests in the queue (req_unsent).
  907. */
  908. static void send_queued(struct ceph_osd_client *osdc)
  909. {
  910. struct ceph_osd_request *req, *tmp;
  911. dout("send_queued\n");
  912. mutex_lock(&osdc->request_mutex);
  913. list_for_each_entry_safe(req, tmp, &osdc->req_unsent, r_req_lru_item) {
  914. __send_request(osdc, req);
  915. }
  916. mutex_unlock(&osdc->request_mutex);
  917. }
  918. /*
  919. * Timeout callback, called every N seconds when 1 or more osd
  920. * requests has been active for more than N seconds. When this
  921. * happens, we ping all OSDs with requests who have timed out to
  922. * ensure any communications channel reset is detected. Reset the
  923. * request timeouts another N seconds in the future as we go.
  924. * Reschedule the timeout event another N seconds in future (unless
  925. * there are no open requests).
  926. */
  927. static void handle_timeout(struct work_struct *work)
  928. {
  929. struct ceph_osd_client *osdc =
  930. container_of(work, struct ceph_osd_client, timeout_work.work);
  931. struct ceph_osd_request *req, *last_req = NULL;
  932. struct ceph_osd *osd;
  933. unsigned long timeout = osdc->client->options->osd_timeout * HZ;
  934. unsigned long keepalive =
  935. osdc->client->options->osd_keepalive_timeout * HZ;
  936. unsigned long last_stamp = 0;
  937. struct list_head slow_osds;
  938. dout("timeout\n");
  939. down_read(&osdc->map_sem);
  940. ceph_monc_request_next_osdmap(&osdc->client->monc);
  941. mutex_lock(&osdc->request_mutex);
  942. /*
  943. * reset osds that appear to be _really_ unresponsive. this
  944. * is a failsafe measure.. we really shouldn't be getting to
  945. * this point if the system is working properly. the monitors
  946. * should mark the osd as failed and we should find out about
  947. * it from an updated osd map.
  948. */
  949. while (timeout && !list_empty(&osdc->req_lru)) {
  950. req = list_entry(osdc->req_lru.next, struct ceph_osd_request,
  951. r_req_lru_item);
  952. if (time_before(jiffies, req->r_stamp + timeout))
  953. break;
  954. BUG_ON(req == last_req && req->r_stamp == last_stamp);
  955. last_req = req;
  956. last_stamp = req->r_stamp;
  957. osd = req->r_osd;
  958. BUG_ON(!osd);
  959. pr_warning(" tid %llu timed out on osd%d, will reset osd\n",
  960. req->r_tid, osd->o_osd);
  961. __kick_osd_requests(osdc, osd);
  962. }
  963. /*
  964. * ping osds that are a bit slow. this ensures that if there
  965. * is a break in the TCP connection we will notice, and reopen
  966. * a connection with that osd (from the fault callback).
  967. */
  968. INIT_LIST_HEAD(&slow_osds);
  969. list_for_each_entry(req, &osdc->req_lru, r_req_lru_item) {
  970. if (time_before(jiffies, req->r_stamp + keepalive))
  971. break;
  972. osd = req->r_osd;
  973. BUG_ON(!osd);
  974. dout(" tid %llu is slow, will send keepalive on osd%d\n",
  975. req->r_tid, osd->o_osd);
  976. list_move_tail(&osd->o_keepalive_item, &slow_osds);
  977. }
  978. while (!list_empty(&slow_osds)) {
  979. osd = list_entry(slow_osds.next, struct ceph_osd,
  980. o_keepalive_item);
  981. list_del_init(&osd->o_keepalive_item);
  982. ceph_con_keepalive(&osd->o_con);
  983. }
  984. __schedule_osd_timeout(osdc);
  985. mutex_unlock(&osdc->request_mutex);
  986. send_queued(osdc);
  987. up_read(&osdc->map_sem);
  988. }
  989. static void handle_osds_timeout(struct work_struct *work)
  990. {
  991. struct ceph_osd_client *osdc =
  992. container_of(work, struct ceph_osd_client,
  993. osds_timeout_work.work);
  994. unsigned long delay =
  995. osdc->client->options->osd_idle_ttl * HZ >> 2;
  996. dout("osds timeout\n");
  997. down_read(&osdc->map_sem);
  998. remove_old_osds(osdc, 0);
  999. up_read(&osdc->map_sem);
  1000. schedule_delayed_work(&osdc->osds_timeout_work,
  1001. round_jiffies_relative(delay));
  1002. }
  1003. static void complete_request(struct ceph_osd_request *req)
  1004. {
  1005. if (req->r_safe_callback)
  1006. req->r_safe_callback(req, NULL);
  1007. complete_all(&req->r_safe_completion); /* fsync waiter */
  1008. }
  1009. /*
  1010. * handle osd op reply. either call the callback if it is specified,
  1011. * or do the completion to wake up the waiting thread.
  1012. */
  1013. static void handle_reply(struct ceph_osd_client *osdc, struct ceph_msg *msg,
  1014. struct ceph_connection *con)
  1015. {
  1016. struct ceph_osd_reply_head *rhead = msg->front.iov_base;
  1017. struct ceph_osd_request *req;
  1018. u64 tid;
  1019. int numops, object_len, flags;
  1020. s32 result;
  1021. tid = le64_to_cpu(msg->hdr.tid);
  1022. if (msg->front.iov_len < sizeof(*rhead))
  1023. goto bad;
  1024. numops = le32_to_cpu(rhead->num_ops);
  1025. object_len = le32_to_cpu(rhead->object_len);
  1026. result = le32_to_cpu(rhead->result);
  1027. if (msg->front.iov_len != sizeof(*rhead) + object_len +
  1028. numops * sizeof(struct ceph_osd_op))
  1029. goto bad;
  1030. dout("handle_reply %p tid %llu result %d\n", msg, tid, (int)result);
  1031. /* lookup */
  1032. mutex_lock(&osdc->request_mutex);
  1033. req = __lookup_request(osdc, tid);
  1034. if (req == NULL) {
  1035. dout("handle_reply tid %llu dne\n", tid);
  1036. mutex_unlock(&osdc->request_mutex);
  1037. return;
  1038. }
  1039. ceph_osdc_get_request(req);
  1040. flags = le32_to_cpu(rhead->flags);
  1041. /*
  1042. * if this connection filled our message, drop our reference now, to
  1043. * avoid a (safe but slower) revoke later.
  1044. */
  1045. if (req->r_con_filling_msg == con && req->r_reply == msg) {
  1046. dout(" dropping con_filling_msg ref %p\n", con);
  1047. req->r_con_filling_msg = NULL;
  1048. ceph_con_put(con);
  1049. }
  1050. if (!req->r_got_reply) {
  1051. unsigned bytes;
  1052. req->r_result = le32_to_cpu(rhead->result);
  1053. bytes = le32_to_cpu(msg->hdr.data_len);
  1054. dout("handle_reply result %d bytes %d\n", req->r_result,
  1055. bytes);
  1056. if (req->r_result == 0)
  1057. req->r_result = bytes;
  1058. /* in case this is a write and we need to replay, */
  1059. req->r_reassert_version = rhead->reassert_version;
  1060. req->r_got_reply = 1;
  1061. } else if ((flags & CEPH_OSD_FLAG_ONDISK) == 0) {
  1062. dout("handle_reply tid %llu dup ack\n", tid);
  1063. mutex_unlock(&osdc->request_mutex);
  1064. goto done;
  1065. }
  1066. dout("handle_reply tid %llu flags %d\n", tid, flags);
  1067. if (req->r_linger && (flags & CEPH_OSD_FLAG_ONDISK))
  1068. __register_linger_request(osdc, req);
  1069. /* either this is a read, or we got the safe response */
  1070. if (result < 0 ||
  1071. (flags & CEPH_OSD_FLAG_ONDISK) ||
  1072. ((flags & CEPH_OSD_FLAG_WRITE) == 0))
  1073. __unregister_request(osdc, req);
  1074. mutex_unlock(&osdc->request_mutex);
  1075. if (req->r_callback)
  1076. req->r_callback(req, msg);
  1077. else
  1078. complete_all(&req->r_completion);
  1079. if (flags & CEPH_OSD_FLAG_ONDISK)
  1080. complete_request(req);
  1081. done:
  1082. dout("req=%p req->r_linger=%d\n", req, req->r_linger);
  1083. ceph_osdc_put_request(req);
  1084. return;
  1085. bad:
  1086. pr_err("corrupt osd_op_reply got %d %d expected %d\n",
  1087. (int)msg->front.iov_len, le32_to_cpu(msg->hdr.front_len),
  1088. (int)sizeof(*rhead));
  1089. ceph_msg_dump(msg);
  1090. }
  1091. static void reset_changed_osds(struct ceph_osd_client *osdc)
  1092. {
  1093. struct rb_node *p, *n;
  1094. for (p = rb_first(&osdc->osds); p; p = n) {
  1095. struct ceph_osd *osd = rb_entry(p, struct ceph_osd, o_node);
  1096. n = rb_next(p);
  1097. if (!ceph_osd_is_up(osdc->osdmap, osd->o_osd) ||
  1098. memcmp(&osd->o_con.peer_addr,
  1099. ceph_osd_addr(osdc->osdmap,
  1100. osd->o_osd),
  1101. sizeof(struct ceph_entity_addr)) != 0)
  1102. __reset_osd(osdc, osd);
  1103. }
  1104. }
  1105. /*
  1106. * Requeue requests whose mapping to an OSD has changed. If requests map to
  1107. * no osd, request a new map.
  1108. *
  1109. * Caller should hold map_sem for read and request_mutex.
  1110. */
  1111. static void kick_requests(struct ceph_osd_client *osdc)
  1112. {
  1113. struct ceph_osd_request *req, *nreq;
  1114. struct rb_node *p;
  1115. int needmap = 0;
  1116. int err;
  1117. dout("kick_requests\n");
  1118. mutex_lock(&osdc->request_mutex);
  1119. for (p = rb_first(&osdc->requests); p; p = rb_next(p)) {
  1120. req = rb_entry(p, struct ceph_osd_request, r_node);
  1121. err = __map_request(osdc, req);
  1122. if (err < 0)
  1123. continue; /* error */
  1124. if (req->r_osd == NULL) {
  1125. dout("%p tid %llu maps to no osd\n", req, req->r_tid);
  1126. needmap++; /* request a newer map */
  1127. } else if (err > 0) {
  1128. dout("%p tid %llu requeued on osd%d\n", req, req->r_tid,
  1129. req->r_osd ? req->r_osd->o_osd : -1);
  1130. if (!req->r_linger)
  1131. req->r_flags |= CEPH_OSD_FLAG_RETRY;
  1132. }
  1133. }
  1134. list_for_each_entry_safe(req, nreq, &osdc->req_linger,
  1135. r_linger_item) {
  1136. dout("linger req=%p req->r_osd=%p\n", req, req->r_osd);
  1137. err = __map_request(osdc, req);
  1138. if (err == 0)
  1139. continue; /* no change and no osd was specified */
  1140. if (err < 0)
  1141. continue; /* hrm! */
  1142. if (req->r_osd == NULL) {
  1143. dout("tid %llu maps to no valid osd\n", req->r_tid);
  1144. needmap++; /* request a newer map */
  1145. continue;
  1146. }
  1147. dout("kicking lingering %p tid %llu osd%d\n", req, req->r_tid,
  1148. req->r_osd ? req->r_osd->o_osd : -1);
  1149. __unregister_linger_request(osdc, req);
  1150. __register_request(osdc, req);
  1151. }
  1152. mutex_unlock(&osdc->request_mutex);
  1153. if (needmap) {
  1154. dout("%d requests for down osds, need new map\n", needmap);
  1155. ceph_monc_request_next_osdmap(&osdc->client->monc);
  1156. }
  1157. }
  1158. /*
  1159. * Process updated osd map.
  1160. *
  1161. * The message contains any number of incremental and full maps, normally
  1162. * indicating some sort of topology change in the cluster. Kick requests
  1163. * off to different OSDs as needed.
  1164. */
  1165. void ceph_osdc_handle_map(struct ceph_osd_client *osdc, struct ceph_msg *msg)
  1166. {
  1167. void *p, *end, *next;
  1168. u32 nr_maps, maplen;
  1169. u32 epoch;
  1170. struct ceph_osdmap *newmap = NULL, *oldmap;
  1171. int err;
  1172. struct ceph_fsid fsid;
  1173. dout("handle_map have %u\n", osdc->osdmap ? osdc->osdmap->epoch : 0);
  1174. p = msg->front.iov_base;
  1175. end = p + msg->front.iov_len;
  1176. /* verify fsid */
  1177. ceph_decode_need(&p, end, sizeof(fsid), bad);
  1178. ceph_decode_copy(&p, &fsid, sizeof(fsid));
  1179. if (ceph_check_fsid(osdc->client, &fsid) < 0)
  1180. return;
  1181. down_write(&osdc->map_sem);
  1182. /* incremental maps */
  1183. ceph_decode_32_safe(&p, end, nr_maps, bad);
  1184. dout(" %d inc maps\n", nr_maps);
  1185. while (nr_maps > 0) {
  1186. ceph_decode_need(&p, end, 2*sizeof(u32), bad);
  1187. epoch = ceph_decode_32(&p);
  1188. maplen = ceph_decode_32(&p);
  1189. ceph_decode_need(&p, end, maplen, bad);
  1190. next = p + maplen;
  1191. if (osdc->osdmap && osdc->osdmap->epoch+1 == epoch) {
  1192. dout("applying incremental map %u len %d\n",
  1193. epoch, maplen);
  1194. newmap = osdmap_apply_incremental(&p, next,
  1195. osdc->osdmap,
  1196. osdc->client->msgr);
  1197. if (IS_ERR(newmap)) {
  1198. err = PTR_ERR(newmap);
  1199. goto bad;
  1200. }
  1201. BUG_ON(!newmap);
  1202. if (newmap != osdc->osdmap) {
  1203. ceph_osdmap_destroy(osdc->osdmap);
  1204. osdc->osdmap = newmap;
  1205. }
  1206. kick_requests(osdc);
  1207. reset_changed_osds(osdc);
  1208. } else {
  1209. dout("ignoring incremental map %u len %d\n",
  1210. epoch, maplen);
  1211. }
  1212. p = next;
  1213. nr_maps--;
  1214. }
  1215. if (newmap)
  1216. goto done;
  1217. /* full maps */
  1218. ceph_decode_32_safe(&p, end, nr_maps, bad);
  1219. dout(" %d full maps\n", nr_maps);
  1220. while (nr_maps) {
  1221. ceph_decode_need(&p, end, 2*sizeof(u32), bad);
  1222. epoch = ceph_decode_32(&p);
  1223. maplen = ceph_decode_32(&p);
  1224. ceph_decode_need(&p, end, maplen, bad);
  1225. if (nr_maps > 1) {
  1226. dout("skipping non-latest full map %u len %d\n",
  1227. epoch, maplen);
  1228. } else if (osdc->osdmap && osdc->osdmap->epoch >= epoch) {
  1229. dout("skipping full map %u len %d, "
  1230. "older than our %u\n", epoch, maplen,
  1231. osdc->osdmap->epoch);
  1232. } else {
  1233. dout("taking full map %u len %d\n", epoch, maplen);
  1234. newmap = osdmap_decode(&p, p+maplen);
  1235. if (IS_ERR(newmap)) {
  1236. err = PTR_ERR(newmap);
  1237. goto bad;
  1238. }
  1239. BUG_ON(!newmap);
  1240. oldmap = osdc->osdmap;
  1241. osdc->osdmap = newmap;
  1242. if (oldmap)
  1243. ceph_osdmap_destroy(oldmap);
  1244. kick_requests(osdc);
  1245. }
  1246. p += maplen;
  1247. nr_maps--;
  1248. }
  1249. done:
  1250. downgrade_write(&osdc->map_sem);
  1251. ceph_monc_got_osdmap(&osdc->client->monc, osdc->osdmap->epoch);
  1252. /*
  1253. * subscribe to subsequent osdmap updates if full to ensure
  1254. * we find out when we are no longer full and stop returning
  1255. * ENOSPC.
  1256. */
  1257. if (ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_FULL))
  1258. ceph_monc_request_next_osdmap(&osdc->client->monc);
  1259. send_queued(osdc);
  1260. up_read(&osdc->map_sem);
  1261. wake_up_all(&osdc->client->auth_wq);
  1262. return;
  1263. bad:
  1264. pr_err("osdc handle_map corrupt msg\n");
  1265. ceph_msg_dump(msg);
  1266. up_write(&osdc->map_sem);
  1267. return;
  1268. }
  1269. /*
  1270. * watch/notify callback event infrastructure
  1271. *
  1272. * These callbacks are used both for watch and notify operations.
  1273. */
  1274. static void __release_event(struct kref *kref)
  1275. {
  1276. struct ceph_osd_event *event =
  1277. container_of(kref, struct ceph_osd_event, kref);
  1278. dout("__release_event %p\n", event);
  1279. kfree(event);
  1280. }
  1281. static void get_event(struct ceph_osd_event *event)
  1282. {
  1283. kref_get(&event->kref);
  1284. }
  1285. void ceph_osdc_put_event(struct ceph_osd_event *event)
  1286. {
  1287. kref_put(&event->kref, __release_event);
  1288. }
  1289. EXPORT_SYMBOL(ceph_osdc_put_event);
  1290. static void __insert_event(struct ceph_osd_client *osdc,
  1291. struct ceph_osd_event *new)
  1292. {
  1293. struct rb_node **p = &osdc->event_tree.rb_node;
  1294. struct rb_node *parent = NULL;
  1295. struct ceph_osd_event *event = NULL;
  1296. while (*p) {
  1297. parent = *p;
  1298. event = rb_entry(parent, struct ceph_osd_event, node);
  1299. if (new->cookie < event->cookie)
  1300. p = &(*p)->rb_left;
  1301. else if (new->cookie > event->cookie)
  1302. p = &(*p)->rb_right;
  1303. else
  1304. BUG();
  1305. }
  1306. rb_link_node(&new->node, parent, p);
  1307. rb_insert_color(&new->node, &osdc->event_tree);
  1308. }
  1309. static struct ceph_osd_event *__find_event(struct ceph_osd_client *osdc,
  1310. u64 cookie)
  1311. {
  1312. struct rb_node **p = &osdc->event_tree.rb_node;
  1313. struct rb_node *parent = NULL;
  1314. struct ceph_osd_event *event = NULL;
  1315. while (*p) {
  1316. parent = *p;
  1317. event = rb_entry(parent, struct ceph_osd_event, node);
  1318. if (cookie < event->cookie)
  1319. p = &(*p)->rb_left;
  1320. else if (cookie > event->cookie)
  1321. p = &(*p)->rb_right;
  1322. else
  1323. return event;
  1324. }
  1325. return NULL;
  1326. }
  1327. static void __remove_event(struct ceph_osd_event *event)
  1328. {
  1329. struct ceph_osd_client *osdc = event->osdc;
  1330. if (!RB_EMPTY_NODE(&event->node)) {
  1331. dout("__remove_event removed %p\n", event);
  1332. rb_erase(&event->node, &osdc->event_tree);
  1333. ceph_osdc_put_event(event);
  1334. } else {
  1335. dout("__remove_event didn't remove %p\n", event);
  1336. }
  1337. }
  1338. int ceph_osdc_create_event(struct ceph_osd_client *osdc,
  1339. void (*event_cb)(u64, u64, u8, void *),
  1340. int one_shot, void *data,
  1341. struct ceph_osd_event **pevent)
  1342. {
  1343. struct ceph_osd_event *event;
  1344. event = kmalloc(sizeof(*event), GFP_NOIO);
  1345. if (!event)
  1346. return -ENOMEM;
  1347. dout("create_event %p\n", event);
  1348. event->cb = event_cb;
  1349. event->one_shot = one_shot;
  1350. event->data = data;
  1351. event->osdc = osdc;
  1352. INIT_LIST_HEAD(&event->osd_node);
  1353. kref_init(&event->kref); /* one ref for us */
  1354. kref_get(&event->kref); /* one ref for the caller */
  1355. init_completion(&event->completion);
  1356. spin_lock(&osdc->event_lock);
  1357. event->cookie = ++osdc->event_count;
  1358. __insert_event(osdc, event);
  1359. spin_unlock(&osdc->event_lock);
  1360. *pevent = event;
  1361. return 0;
  1362. }
  1363. EXPORT_SYMBOL(ceph_osdc_create_event);
  1364. void ceph_osdc_cancel_event(struct ceph_osd_event *event)
  1365. {
  1366. struct ceph_osd_client *osdc = event->osdc;
  1367. dout("cancel_event %p\n", event);
  1368. spin_lock(&osdc->event_lock);
  1369. __remove_event(event);
  1370. spin_unlock(&osdc->event_lock);
  1371. ceph_osdc_put_event(event); /* caller's */
  1372. }
  1373. EXPORT_SYMBOL(ceph_osdc_cancel_event);
  1374. static void do_event_work(struct work_struct *work)
  1375. {
  1376. struct ceph_osd_event_work *event_work =
  1377. container_of(work, struct ceph_osd_event_work, work);
  1378. struct ceph_osd_event *event = event_work->event;
  1379. u64 ver = event_work->ver;
  1380. u64 notify_id = event_work->notify_id;
  1381. u8 opcode = event_work->opcode;
  1382. dout("do_event_work completing %p\n", event);
  1383. event->cb(ver, notify_id, opcode, event->data);
  1384. complete(&event->completion);
  1385. dout("do_event_work completed %p\n", event);
  1386. ceph_osdc_put_event(event);
  1387. kfree(event_work);
  1388. }
  1389. /*
  1390. * Process osd watch notifications
  1391. */
  1392. void handle_watch_notify(struct ceph_osd_client *osdc, struct ceph_msg *msg)
  1393. {
  1394. void *p, *end;
  1395. u8 proto_ver;
  1396. u64 cookie, ver, notify_id;
  1397. u8 opcode;
  1398. struct ceph_osd_event *event;
  1399. struct ceph_osd_event_work *event_work;
  1400. p = msg->front.iov_base;
  1401. end = p + msg->front.iov_len;
  1402. ceph_decode_8_safe(&p, end, proto_ver, bad);
  1403. ceph_decode_8_safe(&p, end, opcode, bad);
  1404. ceph_decode_64_safe(&p, end, cookie, bad);
  1405. ceph_decode_64_safe(&p, end, ver, bad);
  1406. ceph_decode_64_safe(&p, end, notify_id, bad);
  1407. spin_lock(&osdc->event_lock);
  1408. event = __find_event(osdc, cookie);
  1409. if (event) {
  1410. get_event(event);
  1411. if (event->one_shot)
  1412. __remove_event(event);
  1413. }
  1414. spin_unlock(&osdc->event_lock);
  1415. dout("handle_watch_notify cookie %lld ver %lld event %p\n",
  1416. cookie, ver, event);
  1417. if (event) {
  1418. event_work = kmalloc(sizeof(*event_work), GFP_NOIO);
  1419. if (!event_work) {
  1420. dout("ERROR: could not allocate event_work\n");
  1421. goto done_err;
  1422. }
  1423. INIT_WORK(&event_work->work, do_event_work);
  1424. event_work->event = event;
  1425. event_work->ver = ver;
  1426. event_work->notify_id = notify_id;
  1427. event_work->opcode = opcode;
  1428. if (!queue_work(osdc->notify_wq, &event_work->work)) {
  1429. dout("WARNING: failed to queue notify event work\n");
  1430. goto done_err;
  1431. }
  1432. }
  1433. return;
  1434. done_err:
  1435. complete(&event->completion);
  1436. ceph_osdc_put_event(event);
  1437. return;
  1438. bad:
  1439. pr_err("osdc handle_watch_notify corrupt msg\n");
  1440. return;
  1441. }
  1442. int ceph_osdc_wait_event(struct ceph_osd_event *event, unsigned long timeout)
  1443. {
  1444. int err;
  1445. dout("wait_event %p\n", event);
  1446. err = wait_for_completion_interruptible_timeout(&event->completion,
  1447. timeout * HZ);
  1448. ceph_osdc_put_event(event);
  1449. if (err > 0)
  1450. err = 0;
  1451. dout("wait_event %p returns %d\n", event, err);
  1452. return err;
  1453. }
  1454. EXPORT_SYMBOL(ceph_osdc_wait_event);
  1455. /*
  1456. * Register request, send initial attempt.
  1457. */
  1458. int ceph_osdc_start_request(struct ceph_osd_client *osdc,
  1459. struct ceph_osd_request *req,
  1460. bool nofail)
  1461. {
  1462. int rc = 0;
  1463. req->r_request->pages = req->r_pages;
  1464. req->r_request->nr_pages = req->r_num_pages;
  1465. #ifdef CONFIG_BLOCK
  1466. req->r_request->bio = req->r_bio;
  1467. #endif
  1468. req->r_request->trail = req->r_trail;
  1469. register_request(osdc, req);
  1470. down_read(&osdc->map_sem);
  1471. mutex_lock(&osdc->request_mutex);
  1472. /*
  1473. * a racing kick_requests() may have sent the message for us
  1474. * while we dropped request_mutex above, so only send now if
  1475. * the request still han't been touched yet.
  1476. */
  1477. if (req->r_sent == 0) {
  1478. rc = __map_request(osdc, req);
  1479. if (rc < 0) {
  1480. if (nofail) {
  1481. dout("osdc_start_request failed map, "
  1482. " will retry %lld\n", req->r_tid);
  1483. rc = 0;
  1484. }
  1485. goto out_unlock;
  1486. }
  1487. if (req->r_osd == NULL) {
  1488. dout("send_request %p no up osds in pg\n", req);
  1489. ceph_monc_request_next_osdmap(&osdc->client->monc);
  1490. } else {
  1491. rc = __send_request(osdc, req);
  1492. if (rc) {
  1493. if (nofail) {
  1494. dout("osdc_start_request failed send, "
  1495. " will retry %lld\n", req->r_tid);
  1496. rc = 0;
  1497. } else {
  1498. __unregister_request(osdc, req);
  1499. }
  1500. }
  1501. }
  1502. }
  1503. out_unlock:
  1504. mutex_unlock(&osdc->request_mutex);
  1505. up_read(&osdc->map_sem);
  1506. return rc;
  1507. }
  1508. EXPORT_SYMBOL(ceph_osdc_start_request);
  1509. /*
  1510. * wait for a request to complete
  1511. */
  1512. int ceph_osdc_wait_request(struct ceph_osd_client *osdc,
  1513. struct ceph_osd_request *req)
  1514. {
  1515. int rc;
  1516. rc = wait_for_completion_interruptible(&req->r_completion);
  1517. if (rc < 0) {
  1518. mutex_lock(&osdc->request_mutex);
  1519. __cancel_request(req);
  1520. __unregister_request(osdc, req);
  1521. mutex_unlock(&osdc->request_mutex);
  1522. complete_request(req);
  1523. dout("wait_request tid %llu canceled/timed out\n", req->r_tid);
  1524. return rc;
  1525. }
  1526. dout("wait_request tid %llu result %d\n", req->r_tid, req->r_result);
  1527. return req->r_result;
  1528. }
  1529. EXPORT_SYMBOL(ceph_osdc_wait_request);
  1530. /*
  1531. * sync - wait for all in-flight requests to flush. avoid starvation.
  1532. */
  1533. void ceph_osdc_sync(struct ceph_osd_client *osdc)
  1534. {
  1535. struct ceph_osd_request *req;
  1536. u64 last_tid, next_tid = 0;
  1537. mutex_lock(&osdc->request_mutex);
  1538. last_tid = osdc->last_tid;
  1539. while (1) {
  1540. req = __lookup_request_ge(osdc, next_tid);
  1541. if (!req)
  1542. break;
  1543. if (req->r_tid > last_tid)
  1544. break;
  1545. next_tid = req->r_tid + 1;
  1546. if ((req->r_flags & CEPH_OSD_FLAG_WRITE) == 0)
  1547. continue;
  1548. ceph_osdc_get_request(req);
  1549. mutex_unlock(&osdc->request_mutex);
  1550. dout("sync waiting on tid %llu (last is %llu)\n",
  1551. req->r_tid, last_tid);
  1552. wait_for_completion(&req->r_safe_completion);
  1553. mutex_lock(&osdc->request_mutex);
  1554. ceph_osdc_put_request(req);
  1555. }
  1556. mutex_unlock(&osdc->request_mutex);
  1557. dout("sync done (thru tid %llu)\n", last_tid);
  1558. }
  1559. EXPORT_SYMBOL(ceph_osdc_sync);
  1560. /*
  1561. * init, shutdown
  1562. */
  1563. int ceph_osdc_init(struct ceph_osd_client *osdc, struct ceph_client *client)
  1564. {
  1565. int err;
  1566. dout("init\n");
  1567. osdc->client = client;
  1568. osdc->osdmap = NULL;
  1569. init_rwsem(&osdc->map_sem);
  1570. init_completion(&osdc->map_waiters);
  1571. osdc->last_requested_map = 0;
  1572. mutex_init(&osdc->request_mutex);
  1573. osdc->last_tid = 0;
  1574. osdc->osds = RB_ROOT;
  1575. INIT_LIST_HEAD(&osdc->osd_lru);
  1576. osdc->requests = RB_ROOT;
  1577. INIT_LIST_HEAD(&osdc->req_lru);
  1578. INIT_LIST_HEAD(&osdc->req_unsent);
  1579. INIT_LIST_HEAD(&osdc->req_notarget);
  1580. INIT_LIST_HEAD(&osdc->req_linger);
  1581. osdc->num_requests = 0;
  1582. INIT_DELAYED_WORK(&osdc->timeout_work, handle_timeout);
  1583. INIT_DELAYED_WORK(&osdc->osds_timeout_work, handle_osds_timeout);
  1584. spin_lock_init(&osdc->event_lock);
  1585. osdc->event_tree = RB_ROOT;
  1586. osdc->event_count = 0;
  1587. schedule_delayed_work(&osdc->osds_timeout_work,
  1588. round_jiffies_relative(osdc->client->options->osd_idle_ttl * HZ));
  1589. err = -ENOMEM;
  1590. osdc->req_mempool = mempool_create_kmalloc_pool(10,
  1591. sizeof(struct ceph_osd_request));
  1592. if (!osdc->req_mempool)
  1593. goto out;
  1594. err = ceph_msgpool_init(&osdc->msgpool_op, OSD_OP_FRONT_LEN, 10, true,
  1595. "osd_op");
  1596. if (err < 0)
  1597. goto out_mempool;
  1598. err = ceph_msgpool_init(&osdc->msgpool_op_reply,
  1599. OSD_OPREPLY_FRONT_LEN, 10, true,
  1600. "osd_op_reply");
  1601. if (err < 0)
  1602. goto out_msgpool;
  1603. osdc->notify_wq = create_singlethread_workqueue("ceph-watch-notify");
  1604. if (IS_ERR(osdc->notify_wq)) {
  1605. err = PTR_ERR(osdc->notify_wq);
  1606. osdc->notify_wq = NULL;
  1607. goto out_msgpool;
  1608. }
  1609. return 0;
  1610. out_msgpool:
  1611. ceph_msgpool_destroy(&osdc->msgpool_op);
  1612. out_mempool:
  1613. mempool_destroy(osdc->req_mempool);
  1614. out:
  1615. return err;
  1616. }
  1617. EXPORT_SYMBOL(ceph_osdc_init);
  1618. void ceph_osdc_stop(struct ceph_osd_client *osdc)
  1619. {
  1620. flush_workqueue(osdc->notify_wq);
  1621. destroy_workqueue(osdc->notify_wq);
  1622. cancel_delayed_work_sync(&osdc->timeout_work);
  1623. cancel_delayed_work_sync(&osdc->osds_timeout_work);
  1624. if (osdc->osdmap) {
  1625. ceph_osdmap_destroy(osdc->osdmap);
  1626. osdc->osdmap = NULL;
  1627. }
  1628. remove_old_osds(osdc, 1);
  1629. WARN_ON(!RB_EMPTY_ROOT(&osdc->osds));
  1630. mempool_destroy(osdc->req_mempool);
  1631. ceph_msgpool_destroy(&osdc->msgpool_op);
  1632. ceph_msgpool_destroy(&osdc->msgpool_op_reply);
  1633. }
  1634. EXPORT_SYMBOL(ceph_osdc_stop);
  1635. /*
  1636. * Read some contiguous pages. If we cross a stripe boundary, shorten
  1637. * *plen. Return number of bytes read, or error.
  1638. */
  1639. int ceph_osdc_readpages(struct ceph_osd_client *osdc,
  1640. struct ceph_vino vino, struct ceph_file_layout *layout,
  1641. u64 off, u64 *plen,
  1642. u32 truncate_seq, u64 truncate_size,
  1643. struct page **pages, int num_pages, int page_align)
  1644. {
  1645. struct ceph_osd_request *req;
  1646. int rc = 0;
  1647. dout("readpages on ino %llx.%llx on %llu~%llu\n", vino.ino,
  1648. vino.snap, off, *plen);
  1649. req = ceph_osdc_new_request(osdc, layout, vino, off, plen,
  1650. CEPH_OSD_OP_READ, CEPH_OSD_FLAG_READ,
  1651. NULL, 0, truncate_seq, truncate_size, NULL,
  1652. false, 1, page_align);
  1653. if (!req)
  1654. return -ENOMEM;
  1655. /* it may be a short read due to an object boundary */
  1656. req->r_pages = pages;
  1657. dout("readpages final extent is %llu~%llu (%d pages align %d)\n",
  1658. off, *plen, req->r_num_pages, page_align);
  1659. rc = ceph_osdc_start_request(osdc, req, false);
  1660. if (!rc)
  1661. rc = ceph_osdc_wait_request(osdc, req);
  1662. ceph_osdc_put_request(req);
  1663. dout("readpages result %d\n", rc);
  1664. return rc;
  1665. }
  1666. EXPORT_SYMBOL(ceph_osdc_readpages);
  1667. /*
  1668. * do a synchronous write on N pages
  1669. */
  1670. int ceph_osdc_writepages(struct ceph_osd_client *osdc, struct ceph_vino vino,
  1671. struct ceph_file_layout *layout,
  1672. struct ceph_snap_context *snapc,
  1673. u64 off, u64 len,
  1674. u32 truncate_seq, u64 truncate_size,
  1675. struct timespec *mtime,
  1676. struct page **pages, int num_pages,
  1677. int flags, int do_sync, bool nofail)
  1678. {
  1679. struct ceph_osd_request *req;
  1680. int rc = 0;
  1681. int page_align = off & ~PAGE_MASK;
  1682. BUG_ON(vino.snap != CEPH_NOSNAP);
  1683. req = ceph_osdc_new_request(osdc, layout, vino, off, &len,
  1684. CEPH_OSD_OP_WRITE,
  1685. flags | CEPH_OSD_FLAG_ONDISK |
  1686. CEPH_OSD_FLAG_WRITE,
  1687. snapc, do_sync,
  1688. truncate_seq, truncate_size, mtime,
  1689. nofail, 1, page_align);
  1690. if (!req)
  1691. return -ENOMEM;
  1692. /* it may be a short write due to an object boundary */
  1693. req->r_pages = pages;
  1694. dout("writepages %llu~%llu (%d pages)\n", off, len,
  1695. req->r_num_pages);
  1696. rc = ceph_osdc_start_request(osdc, req, nofail);
  1697. if (!rc)
  1698. rc = ceph_osdc_wait_request(osdc, req);
  1699. ceph_osdc_put_request(req);
  1700. if (rc == 0)
  1701. rc = len;
  1702. dout("writepages result %d\n", rc);
  1703. return rc;
  1704. }
  1705. EXPORT_SYMBOL(ceph_osdc_writepages);
  1706. /*
  1707. * handle incoming message
  1708. */
  1709. static void dispatch(struct ceph_connection *con, struct ceph_msg *msg)
  1710. {
  1711. struct ceph_osd *osd = con->private;
  1712. struct ceph_osd_client *osdc;
  1713. int type = le16_to_cpu(msg->hdr.type);
  1714. if (!osd)
  1715. goto out;
  1716. osdc = osd->o_osdc;
  1717. switch (type) {
  1718. case CEPH_MSG_OSD_MAP:
  1719. ceph_osdc_handle_map(osdc, msg);
  1720. break;
  1721. case CEPH_MSG_OSD_OPREPLY:
  1722. handle_reply(osdc, msg, con);
  1723. break;
  1724. case CEPH_MSG_WATCH_NOTIFY:
  1725. handle_watch_notify(osdc, msg);
  1726. break;
  1727. default:
  1728. pr_err("received unknown message type %d %s\n", type,
  1729. ceph_msg_type_name(type));
  1730. }
  1731. out:
  1732. ceph_msg_put(msg);
  1733. }
  1734. /*
  1735. * lookup and return message for incoming reply. set up reply message
  1736. * pages.
  1737. */
  1738. static struct ceph_msg *get_reply(struct ceph_connection *con,
  1739. struct ceph_msg_header *hdr,
  1740. int *skip)
  1741. {
  1742. struct ceph_osd *osd = con->private;
  1743. struct ceph_osd_client *osdc = osd->o_osdc;
  1744. struct ceph_msg *m;
  1745. struct ceph_osd_request *req;
  1746. int front = le32_to_cpu(hdr->front_len);
  1747. int data_len = le32_to_cpu(hdr->data_len);
  1748. u64 tid;
  1749. tid = le64_to_cpu(hdr->tid);
  1750. mutex_lock(&osdc->request_mutex);
  1751. req = __lookup_request(osdc, tid);
  1752. if (!req) {
  1753. *skip = 1;
  1754. m = NULL;
  1755. pr_info("get_reply unknown tid %llu from osd%d\n", tid,
  1756. osd->o_osd);
  1757. goto out;
  1758. }
  1759. if (req->r_con_filling_msg) {
  1760. dout("get_reply revoking msg %p from old con %p\n",
  1761. req->r_reply, req->r_con_filling_msg);
  1762. ceph_con_revoke_message(req->r_con_filling_msg, req->r_reply);
  1763. ceph_con_put(req->r_con_filling_msg);
  1764. req->r_con_filling_msg = NULL;
  1765. }
  1766. if (front > req->r_reply->front.iov_len) {
  1767. pr_warning("get_reply front %d > preallocated %d\n",
  1768. front, (int)req->r_reply->front.iov_len);
  1769. m = ceph_msg_new(CEPH_MSG_OSD_OPREPLY, front, GFP_NOFS);
  1770. if (!m)
  1771. goto out;
  1772. ceph_msg_put(req->r_reply);
  1773. req->r_reply = m;
  1774. }
  1775. m = ceph_msg_get(req->r_reply);
  1776. if (data_len > 0) {
  1777. int want = calc_pages_for(req->r_page_alignment, data_len);
  1778. if (unlikely(req->r_num_pages < want)) {
  1779. pr_warning("tid %lld reply has %d bytes %d pages, we"
  1780. " had only %d pages ready\n", tid, data_len,
  1781. want, req->r_num_pages);
  1782. *skip = 1;
  1783. ceph_msg_put(m);
  1784. m = NULL;
  1785. goto out;
  1786. }
  1787. m->pages = req->r_pages;
  1788. m->nr_pages = req->r_num_pages;
  1789. m->page_alignment = req->r_page_alignment;
  1790. #ifdef CONFIG_BLOCK
  1791. m->bio = req->r_bio;
  1792. #endif
  1793. }
  1794. *skip = 0;
  1795. req->r_con_filling_msg = ceph_con_get(con);
  1796. dout("get_reply tid %lld %p\n", tid, m);
  1797. out:
  1798. mutex_unlock(&osdc->request_mutex);
  1799. return m;
  1800. }
  1801. static struct ceph_msg *alloc_msg(struct ceph_connection *con,
  1802. struct ceph_msg_header *hdr,
  1803. int *skip)
  1804. {
  1805. struct ceph_osd *osd = con->private;
  1806. int type = le16_to_cpu(hdr->type);
  1807. int front = le32_to_cpu(hdr->front_len);
  1808. switch (type) {
  1809. case CEPH_MSG_OSD_MAP:
  1810. case CEPH_MSG_WATCH_NOTIFY:
  1811. return ceph_msg_new(type, front, GFP_NOFS);
  1812. case CEPH_MSG_OSD_OPREPLY:
  1813. return get_reply(con, hdr, skip);
  1814. default:
  1815. pr_info("alloc_msg unexpected msg type %d from osd%d\n", type,
  1816. osd->o_osd);
  1817. *skip = 1;
  1818. return NULL;
  1819. }
  1820. }
  1821. /*
  1822. * Wrappers to refcount containing ceph_osd struct
  1823. */
  1824. static struct ceph_connection *get_osd_con(struct ceph_connection *con)
  1825. {
  1826. struct ceph_osd *osd = con->private;
  1827. if (get_osd(osd))
  1828. return con;
  1829. return NULL;
  1830. }
  1831. static void put_osd_con(struct ceph_connection *con)
  1832. {
  1833. struct ceph_osd *osd = con->private;
  1834. put_osd(osd);
  1835. }
  1836. /*
  1837. * authentication
  1838. */
  1839. static int get_authorizer(struct ceph_connection *con,
  1840. void **buf, int *len, int *proto,
  1841. void **reply_buf, int *reply_len, int force_new)
  1842. {
  1843. struct ceph_osd *o = con->private;
  1844. struct ceph_osd_client *osdc = o->o_osdc;
  1845. struct ceph_auth_client *ac = osdc->client->monc.auth;
  1846. int ret = 0;
  1847. if (force_new && o->o_authorizer) {
  1848. ac->ops->destroy_authorizer(ac, o->o_authorizer);
  1849. o->o_authorizer = NULL;
  1850. }
  1851. if (o->o_authorizer == NULL) {
  1852. ret = ac->ops->create_authorizer(
  1853. ac, CEPH_ENTITY_TYPE_OSD,
  1854. &o->o_authorizer,
  1855. &o->o_authorizer_buf,
  1856. &o->o_authorizer_buf_len,
  1857. &o->o_authorizer_reply_buf,
  1858. &o->o_authorizer_reply_buf_len);
  1859. if (ret)
  1860. return ret;
  1861. }
  1862. *proto = ac->protocol;
  1863. *buf = o->o_authorizer_buf;
  1864. *len = o->o_authorizer_buf_len;
  1865. *reply_buf = o->o_authorizer_reply_buf;
  1866. *reply_len = o->o_authorizer_reply_buf_len;
  1867. return 0;
  1868. }
  1869. static int verify_authorizer_reply(struct ceph_connection *con, int len)
  1870. {
  1871. struct ceph_osd *o = con->private;
  1872. struct ceph_osd_client *osdc = o->o_osdc;
  1873. struct ceph_auth_client *ac = osdc->client->monc.auth;
  1874. return ac->ops->verify_authorizer_reply(ac, o->o_authorizer, len);
  1875. }
  1876. static int invalidate_authorizer(struct ceph_connection *con)
  1877. {
  1878. struct ceph_osd *o = con->private;
  1879. struct ceph_osd_client *osdc = o->o_osdc;
  1880. struct ceph_auth_client *ac = osdc->client->monc.auth;
  1881. if (ac->ops->invalidate_authorizer)
  1882. ac->ops->invalidate_authorizer(ac, CEPH_ENTITY_TYPE_OSD);
  1883. return ceph_monc_validate_auth(&osdc->client->monc);
  1884. }
  1885. static const struct ceph_connection_operations osd_con_ops = {
  1886. .get = get_osd_con,
  1887. .put = put_osd_con,
  1888. .dispatch = dispatch,
  1889. .get_authorizer = get_authorizer,
  1890. .verify_authorizer_reply = verify_authorizer_reply,
  1891. .invalidate_authorizer = invalidate_authorizer,
  1892. .alloc_msg = alloc_msg,
  1893. .fault = osd_reset,
  1894. };