ib_srp.c 66 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553
  1. /*
  2. * Copyright (c) 2005 Cisco Systems. All rights reserved.
  3. *
  4. * This software is available to you under a choice of one of two
  5. * licenses. You may choose to be licensed under the terms of the GNU
  6. * General Public License (GPL) Version 2, available from the file
  7. * COPYING in the main directory of this source tree, or the
  8. * OpenIB.org BSD license below:
  9. *
  10. * Redistribution and use in source and binary forms, with or
  11. * without modification, are permitted provided that the following
  12. * conditions are met:
  13. *
  14. * - Redistributions of source code must retain the above
  15. * copyright notice, this list of conditions and the following
  16. * disclaimer.
  17. *
  18. * - Redistributions in binary form must reproduce the above
  19. * copyright notice, this list of conditions and the following
  20. * disclaimer in the documentation and/or other materials
  21. * provided with the distribution.
  22. *
  23. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30. * SOFTWARE.
  31. */
  32. #define pr_fmt(fmt) PFX fmt
  33. #include <linux/module.h>
  34. #include <linux/init.h>
  35. #include <linux/slab.h>
  36. #include <linux/err.h>
  37. #include <linux/string.h>
  38. #include <linux/parser.h>
  39. #include <linux/random.h>
  40. #include <linux/jiffies.h>
  41. #include <linux/atomic.h>
  42. #include <scsi/scsi.h>
  43. #include <scsi/scsi_device.h>
  44. #include <scsi/scsi_dbg.h>
  45. #include <scsi/srp.h>
  46. #include <scsi/scsi_transport_srp.h>
  47. #include "ib_srp.h"
  48. #define DRV_NAME "ib_srp"
  49. #define PFX DRV_NAME ": "
  50. #define DRV_VERSION "0.2"
  51. #define DRV_RELDATE "November 1, 2005"
  52. MODULE_AUTHOR("Roland Dreier");
  53. MODULE_DESCRIPTION("InfiniBand SCSI RDMA Protocol initiator "
  54. "v" DRV_VERSION " (" DRV_RELDATE ")");
  55. MODULE_LICENSE("Dual BSD/GPL");
  56. static unsigned int srp_sg_tablesize;
  57. static unsigned int cmd_sg_entries;
  58. static unsigned int indirect_sg_entries;
  59. static bool allow_ext_sg;
  60. static int topspin_workarounds = 1;
  61. module_param(srp_sg_tablesize, uint, 0444);
  62. MODULE_PARM_DESC(srp_sg_tablesize, "Deprecated name for cmd_sg_entries");
  63. module_param(cmd_sg_entries, uint, 0444);
  64. MODULE_PARM_DESC(cmd_sg_entries,
  65. "Default number of gather/scatter entries in the SRP command (default is 12, max 255)");
  66. module_param(indirect_sg_entries, uint, 0444);
  67. MODULE_PARM_DESC(indirect_sg_entries,
  68. "Default max number of gather/scatter entries (default is 12, max is " __stringify(SCSI_MAX_SG_CHAIN_SEGMENTS) ")");
  69. module_param(allow_ext_sg, bool, 0444);
  70. MODULE_PARM_DESC(allow_ext_sg,
  71. "Default behavior when there are more than cmd_sg_entries S/G entries after mapping; fails the request when false (default false)");
  72. module_param(topspin_workarounds, int, 0444);
  73. MODULE_PARM_DESC(topspin_workarounds,
  74. "Enable workarounds for Topspin/Cisco SRP target bugs if != 0");
  75. static void srp_add_one(struct ib_device *device);
  76. static void srp_remove_one(struct ib_device *device);
  77. static void srp_recv_completion(struct ib_cq *cq, void *target_ptr);
  78. static void srp_send_completion(struct ib_cq *cq, void *target_ptr);
  79. static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event);
  80. static struct scsi_transport_template *ib_srp_transport_template;
  81. static struct ib_client srp_client = {
  82. .name = "srp",
  83. .add = srp_add_one,
  84. .remove = srp_remove_one
  85. };
  86. static struct ib_sa_client srp_sa_client;
  87. static inline struct srp_target_port *host_to_target(struct Scsi_Host *host)
  88. {
  89. return (struct srp_target_port *) host->hostdata;
  90. }
  91. static const char *srp_target_info(struct Scsi_Host *host)
  92. {
  93. return host_to_target(host)->target_name;
  94. }
  95. static int srp_target_is_topspin(struct srp_target_port *target)
  96. {
  97. static const u8 topspin_oui[3] = { 0x00, 0x05, 0xad };
  98. static const u8 cisco_oui[3] = { 0x00, 0x1b, 0x0d };
  99. return topspin_workarounds &&
  100. (!memcmp(&target->ioc_guid, topspin_oui, sizeof topspin_oui) ||
  101. !memcmp(&target->ioc_guid, cisco_oui, sizeof cisco_oui));
  102. }
  103. static struct srp_iu *srp_alloc_iu(struct srp_host *host, size_t size,
  104. gfp_t gfp_mask,
  105. enum dma_data_direction direction)
  106. {
  107. struct srp_iu *iu;
  108. iu = kmalloc(sizeof *iu, gfp_mask);
  109. if (!iu)
  110. goto out;
  111. iu->buf = kzalloc(size, gfp_mask);
  112. if (!iu->buf)
  113. goto out_free_iu;
  114. iu->dma = ib_dma_map_single(host->srp_dev->dev, iu->buf, size,
  115. direction);
  116. if (ib_dma_mapping_error(host->srp_dev->dev, iu->dma))
  117. goto out_free_buf;
  118. iu->size = size;
  119. iu->direction = direction;
  120. return iu;
  121. out_free_buf:
  122. kfree(iu->buf);
  123. out_free_iu:
  124. kfree(iu);
  125. out:
  126. return NULL;
  127. }
  128. static void srp_free_iu(struct srp_host *host, struct srp_iu *iu)
  129. {
  130. if (!iu)
  131. return;
  132. ib_dma_unmap_single(host->srp_dev->dev, iu->dma, iu->size,
  133. iu->direction);
  134. kfree(iu->buf);
  135. kfree(iu);
  136. }
  137. static void srp_qp_event(struct ib_event *event, void *context)
  138. {
  139. pr_debug("QP event %d\n", event->event);
  140. }
  141. static int srp_init_qp(struct srp_target_port *target,
  142. struct ib_qp *qp)
  143. {
  144. struct ib_qp_attr *attr;
  145. int ret;
  146. attr = kmalloc(sizeof *attr, GFP_KERNEL);
  147. if (!attr)
  148. return -ENOMEM;
  149. ret = ib_find_pkey(target->srp_host->srp_dev->dev,
  150. target->srp_host->port,
  151. be16_to_cpu(target->path.pkey),
  152. &attr->pkey_index);
  153. if (ret)
  154. goto out;
  155. attr->qp_state = IB_QPS_INIT;
  156. attr->qp_access_flags = (IB_ACCESS_REMOTE_READ |
  157. IB_ACCESS_REMOTE_WRITE);
  158. attr->port_num = target->srp_host->port;
  159. ret = ib_modify_qp(qp, attr,
  160. IB_QP_STATE |
  161. IB_QP_PKEY_INDEX |
  162. IB_QP_ACCESS_FLAGS |
  163. IB_QP_PORT);
  164. out:
  165. kfree(attr);
  166. return ret;
  167. }
  168. static int srp_new_cm_id(struct srp_target_port *target)
  169. {
  170. struct ib_cm_id *new_cm_id;
  171. new_cm_id = ib_create_cm_id(target->srp_host->srp_dev->dev,
  172. srp_cm_handler, target);
  173. if (IS_ERR(new_cm_id))
  174. return PTR_ERR(new_cm_id);
  175. if (target->cm_id)
  176. ib_destroy_cm_id(target->cm_id);
  177. target->cm_id = new_cm_id;
  178. return 0;
  179. }
  180. static int srp_create_target_ib(struct srp_target_port *target)
  181. {
  182. struct ib_qp_init_attr *init_attr;
  183. int ret;
  184. init_attr = kzalloc(sizeof *init_attr, GFP_KERNEL);
  185. if (!init_attr)
  186. return -ENOMEM;
  187. target->recv_cq = ib_create_cq(target->srp_host->srp_dev->dev,
  188. srp_recv_completion, NULL, target, SRP_RQ_SIZE, 0);
  189. if (IS_ERR(target->recv_cq)) {
  190. ret = PTR_ERR(target->recv_cq);
  191. goto err;
  192. }
  193. target->send_cq = ib_create_cq(target->srp_host->srp_dev->dev,
  194. srp_send_completion, NULL, target, SRP_SQ_SIZE, 0);
  195. if (IS_ERR(target->send_cq)) {
  196. ret = PTR_ERR(target->send_cq);
  197. goto err_recv_cq;
  198. }
  199. ib_req_notify_cq(target->recv_cq, IB_CQ_NEXT_COMP);
  200. init_attr->event_handler = srp_qp_event;
  201. init_attr->cap.max_send_wr = SRP_SQ_SIZE;
  202. init_attr->cap.max_recv_wr = SRP_RQ_SIZE;
  203. init_attr->cap.max_recv_sge = 1;
  204. init_attr->cap.max_send_sge = 1;
  205. init_attr->sq_sig_type = IB_SIGNAL_ALL_WR;
  206. init_attr->qp_type = IB_QPT_RC;
  207. init_attr->send_cq = target->send_cq;
  208. init_attr->recv_cq = target->recv_cq;
  209. target->qp = ib_create_qp(target->srp_host->srp_dev->pd, init_attr);
  210. if (IS_ERR(target->qp)) {
  211. ret = PTR_ERR(target->qp);
  212. goto err_send_cq;
  213. }
  214. ret = srp_init_qp(target, target->qp);
  215. if (ret)
  216. goto err_qp;
  217. kfree(init_attr);
  218. return 0;
  219. err_qp:
  220. ib_destroy_qp(target->qp);
  221. err_send_cq:
  222. ib_destroy_cq(target->send_cq);
  223. err_recv_cq:
  224. ib_destroy_cq(target->recv_cq);
  225. err:
  226. kfree(init_attr);
  227. return ret;
  228. }
  229. static void srp_free_target_ib(struct srp_target_port *target)
  230. {
  231. int i;
  232. ib_destroy_qp(target->qp);
  233. ib_destroy_cq(target->send_cq);
  234. ib_destroy_cq(target->recv_cq);
  235. for (i = 0; i < SRP_RQ_SIZE; ++i)
  236. srp_free_iu(target->srp_host, target->rx_ring[i]);
  237. for (i = 0; i < SRP_SQ_SIZE; ++i)
  238. srp_free_iu(target->srp_host, target->tx_ring[i]);
  239. }
  240. static void srp_path_rec_completion(int status,
  241. struct ib_sa_path_rec *pathrec,
  242. void *target_ptr)
  243. {
  244. struct srp_target_port *target = target_ptr;
  245. target->status = status;
  246. if (status)
  247. shost_printk(KERN_ERR, target->scsi_host,
  248. PFX "Got failed path rec status %d\n", status);
  249. else
  250. target->path = *pathrec;
  251. complete(&target->done);
  252. }
  253. static int srp_lookup_path(struct srp_target_port *target)
  254. {
  255. target->path.numb_path = 1;
  256. init_completion(&target->done);
  257. target->path_query_id = ib_sa_path_rec_get(&srp_sa_client,
  258. target->srp_host->srp_dev->dev,
  259. target->srp_host->port,
  260. &target->path,
  261. IB_SA_PATH_REC_SERVICE_ID |
  262. IB_SA_PATH_REC_DGID |
  263. IB_SA_PATH_REC_SGID |
  264. IB_SA_PATH_REC_NUMB_PATH |
  265. IB_SA_PATH_REC_PKEY,
  266. SRP_PATH_REC_TIMEOUT_MS,
  267. GFP_KERNEL,
  268. srp_path_rec_completion,
  269. target, &target->path_query);
  270. if (target->path_query_id < 0)
  271. return target->path_query_id;
  272. wait_for_completion(&target->done);
  273. if (target->status < 0)
  274. shost_printk(KERN_WARNING, target->scsi_host,
  275. PFX "Path record query failed\n");
  276. return target->status;
  277. }
  278. static int srp_send_req(struct srp_target_port *target)
  279. {
  280. struct {
  281. struct ib_cm_req_param param;
  282. struct srp_login_req priv;
  283. } *req = NULL;
  284. int status;
  285. req = kzalloc(sizeof *req, GFP_KERNEL);
  286. if (!req)
  287. return -ENOMEM;
  288. req->param.primary_path = &target->path;
  289. req->param.alternate_path = NULL;
  290. req->param.service_id = target->service_id;
  291. req->param.qp_num = target->qp->qp_num;
  292. req->param.qp_type = target->qp->qp_type;
  293. req->param.private_data = &req->priv;
  294. req->param.private_data_len = sizeof req->priv;
  295. req->param.flow_control = 1;
  296. get_random_bytes(&req->param.starting_psn, 4);
  297. req->param.starting_psn &= 0xffffff;
  298. /*
  299. * Pick some arbitrary defaults here; we could make these
  300. * module parameters if anyone cared about setting them.
  301. */
  302. req->param.responder_resources = 4;
  303. req->param.remote_cm_response_timeout = 20;
  304. req->param.local_cm_response_timeout = 20;
  305. req->param.retry_count = 7;
  306. req->param.rnr_retry_count = 7;
  307. req->param.max_cm_retries = 15;
  308. req->priv.opcode = SRP_LOGIN_REQ;
  309. req->priv.tag = 0;
  310. req->priv.req_it_iu_len = cpu_to_be32(target->max_iu_len);
  311. req->priv.req_buf_fmt = cpu_to_be16(SRP_BUF_FORMAT_DIRECT |
  312. SRP_BUF_FORMAT_INDIRECT);
  313. /*
  314. * In the published SRP specification (draft rev. 16a), the
  315. * port identifier format is 8 bytes of ID extension followed
  316. * by 8 bytes of GUID. Older drafts put the two halves in the
  317. * opposite order, so that the GUID comes first.
  318. *
  319. * Targets conforming to these obsolete drafts can be
  320. * recognized by the I/O Class they report.
  321. */
  322. if (target->io_class == SRP_REV10_IB_IO_CLASS) {
  323. memcpy(req->priv.initiator_port_id,
  324. &target->path.sgid.global.interface_id, 8);
  325. memcpy(req->priv.initiator_port_id + 8,
  326. &target->initiator_ext, 8);
  327. memcpy(req->priv.target_port_id, &target->ioc_guid, 8);
  328. memcpy(req->priv.target_port_id + 8, &target->id_ext, 8);
  329. } else {
  330. memcpy(req->priv.initiator_port_id,
  331. &target->initiator_ext, 8);
  332. memcpy(req->priv.initiator_port_id + 8,
  333. &target->path.sgid.global.interface_id, 8);
  334. memcpy(req->priv.target_port_id, &target->id_ext, 8);
  335. memcpy(req->priv.target_port_id + 8, &target->ioc_guid, 8);
  336. }
  337. /*
  338. * Topspin/Cisco SRP targets will reject our login unless we
  339. * zero out the first 8 bytes of our initiator port ID and set
  340. * the second 8 bytes to the local node GUID.
  341. */
  342. if (srp_target_is_topspin(target)) {
  343. shost_printk(KERN_DEBUG, target->scsi_host,
  344. PFX "Topspin/Cisco initiator port ID workaround "
  345. "activated for target GUID %016llx\n",
  346. (unsigned long long) be64_to_cpu(target->ioc_guid));
  347. memset(req->priv.initiator_port_id, 0, 8);
  348. memcpy(req->priv.initiator_port_id + 8,
  349. &target->srp_host->srp_dev->dev->node_guid, 8);
  350. }
  351. status = ib_send_cm_req(target->cm_id, &req->param);
  352. kfree(req);
  353. return status;
  354. }
  355. static void srp_disconnect_target(struct srp_target_port *target)
  356. {
  357. /* XXX should send SRP_I_LOGOUT request */
  358. init_completion(&target->done);
  359. if (ib_send_cm_dreq(target->cm_id, NULL, 0)) {
  360. shost_printk(KERN_DEBUG, target->scsi_host,
  361. PFX "Sending CM DREQ failed\n");
  362. return;
  363. }
  364. wait_for_completion(&target->done);
  365. }
  366. static bool srp_change_state(struct srp_target_port *target,
  367. enum srp_target_state old,
  368. enum srp_target_state new)
  369. {
  370. bool changed = false;
  371. spin_lock_irq(&target->lock);
  372. if (target->state == old) {
  373. target->state = new;
  374. changed = true;
  375. }
  376. spin_unlock_irq(&target->lock);
  377. return changed;
  378. }
  379. static void srp_free_req_data(struct srp_target_port *target)
  380. {
  381. struct ib_device *ibdev = target->srp_host->srp_dev->dev;
  382. struct srp_request *req;
  383. int i;
  384. for (i = 0, req = target->req_ring; i < SRP_CMD_SQ_SIZE; ++i, ++req) {
  385. kfree(req->fmr_list);
  386. kfree(req->map_page);
  387. if (req->indirect_dma_addr) {
  388. ib_dma_unmap_single(ibdev, req->indirect_dma_addr,
  389. target->indirect_size,
  390. DMA_TO_DEVICE);
  391. }
  392. kfree(req->indirect_desc);
  393. }
  394. }
  395. /**
  396. * srp_del_scsi_host_attr() - Remove attributes defined in the host template.
  397. * @shost: SCSI host whose attributes to remove from sysfs.
  398. *
  399. * Note: Any attributes defined in the host template and that did not exist
  400. * before invocation of this function will be ignored.
  401. */
  402. static void srp_del_scsi_host_attr(struct Scsi_Host *shost)
  403. {
  404. struct device_attribute **attr;
  405. for (attr = shost->hostt->shost_attrs; attr && *attr; ++attr)
  406. device_remove_file(&shost->shost_dev, *attr);
  407. }
  408. static void srp_remove_work(struct work_struct *work)
  409. {
  410. struct srp_target_port *target =
  411. container_of(work, struct srp_target_port, work);
  412. if (!srp_change_state(target, SRP_TARGET_DEAD, SRP_TARGET_REMOVED))
  413. return;
  414. spin_lock(&target->srp_host->target_lock);
  415. list_del(&target->list);
  416. spin_unlock(&target->srp_host->target_lock);
  417. srp_del_scsi_host_attr(target->scsi_host);
  418. srp_remove_host(target->scsi_host);
  419. scsi_remove_host(target->scsi_host);
  420. ib_destroy_cm_id(target->cm_id);
  421. srp_free_target_ib(target);
  422. srp_free_req_data(target);
  423. scsi_host_put(target->scsi_host);
  424. }
  425. static int srp_connect_target(struct srp_target_port *target)
  426. {
  427. int retries = 3;
  428. int ret;
  429. ret = srp_lookup_path(target);
  430. if (ret)
  431. return ret;
  432. while (1) {
  433. init_completion(&target->done);
  434. ret = srp_send_req(target);
  435. if (ret)
  436. return ret;
  437. wait_for_completion(&target->done);
  438. /*
  439. * The CM event handling code will set status to
  440. * SRP_PORT_REDIRECT if we get a port redirect REJ
  441. * back, or SRP_DLID_REDIRECT if we get a lid/qp
  442. * redirect REJ back.
  443. */
  444. switch (target->status) {
  445. case 0:
  446. return 0;
  447. case SRP_PORT_REDIRECT:
  448. ret = srp_lookup_path(target);
  449. if (ret)
  450. return ret;
  451. break;
  452. case SRP_DLID_REDIRECT:
  453. break;
  454. case SRP_STALE_CONN:
  455. /* Our current CM id was stale, and is now in timewait.
  456. * Try to reconnect with a new one.
  457. */
  458. if (!retries-- || srp_new_cm_id(target)) {
  459. shost_printk(KERN_ERR, target->scsi_host, PFX
  460. "giving up on stale connection\n");
  461. target->status = -ECONNRESET;
  462. return target->status;
  463. }
  464. shost_printk(KERN_ERR, target->scsi_host, PFX
  465. "retrying stale connection\n");
  466. break;
  467. default:
  468. return target->status;
  469. }
  470. }
  471. }
  472. static void srp_unmap_data(struct scsi_cmnd *scmnd,
  473. struct srp_target_port *target,
  474. struct srp_request *req)
  475. {
  476. struct ib_device *ibdev = target->srp_host->srp_dev->dev;
  477. struct ib_pool_fmr **pfmr;
  478. if (!scsi_sglist(scmnd) ||
  479. (scmnd->sc_data_direction != DMA_TO_DEVICE &&
  480. scmnd->sc_data_direction != DMA_FROM_DEVICE))
  481. return;
  482. pfmr = req->fmr_list;
  483. while (req->nfmr--)
  484. ib_fmr_pool_unmap(*pfmr++);
  485. ib_dma_unmap_sg(ibdev, scsi_sglist(scmnd), scsi_sg_count(scmnd),
  486. scmnd->sc_data_direction);
  487. }
  488. /**
  489. * srp_claim_req - Take ownership of the scmnd associated with a request.
  490. * @target: SRP target port.
  491. * @req: SRP request.
  492. * @scmnd: If NULL, take ownership of @req->scmnd. If not NULL, only take
  493. * ownership of @req->scmnd if it equals @scmnd.
  494. *
  495. * Return value:
  496. * Either NULL or a pointer to the SCSI command the caller became owner of.
  497. */
  498. static struct scsi_cmnd *srp_claim_req(struct srp_target_port *target,
  499. struct srp_request *req,
  500. struct scsi_cmnd *scmnd)
  501. {
  502. unsigned long flags;
  503. spin_lock_irqsave(&target->lock, flags);
  504. if (!scmnd) {
  505. scmnd = req->scmnd;
  506. req->scmnd = NULL;
  507. } else if (req->scmnd == scmnd) {
  508. req->scmnd = NULL;
  509. } else {
  510. scmnd = NULL;
  511. }
  512. spin_unlock_irqrestore(&target->lock, flags);
  513. return scmnd;
  514. }
  515. /**
  516. * srp_free_req() - Unmap data and add request to the free request list.
  517. */
  518. static void srp_free_req(struct srp_target_port *target,
  519. struct srp_request *req, struct scsi_cmnd *scmnd,
  520. s32 req_lim_delta)
  521. {
  522. unsigned long flags;
  523. srp_unmap_data(scmnd, target, req);
  524. spin_lock_irqsave(&target->lock, flags);
  525. target->req_lim += req_lim_delta;
  526. list_add_tail(&req->list, &target->free_reqs);
  527. spin_unlock_irqrestore(&target->lock, flags);
  528. }
  529. static void srp_reset_req(struct srp_target_port *target, struct srp_request *req)
  530. {
  531. struct scsi_cmnd *scmnd = srp_claim_req(target, req, NULL);
  532. if (scmnd) {
  533. srp_free_req(target, req, scmnd, 0);
  534. scmnd->result = DID_RESET << 16;
  535. scmnd->scsi_done(scmnd);
  536. }
  537. }
  538. static int srp_reconnect_target(struct srp_target_port *target)
  539. {
  540. struct ib_qp_attr qp_attr;
  541. struct ib_wc wc;
  542. int i, ret;
  543. if (!srp_change_state(target, SRP_TARGET_LIVE, SRP_TARGET_CONNECTING))
  544. return -EAGAIN;
  545. srp_disconnect_target(target);
  546. /*
  547. * Now get a new local CM ID so that we avoid confusing the
  548. * target in case things are really fouled up.
  549. */
  550. ret = srp_new_cm_id(target);
  551. if (ret)
  552. goto err;
  553. qp_attr.qp_state = IB_QPS_RESET;
  554. ret = ib_modify_qp(target->qp, &qp_attr, IB_QP_STATE);
  555. if (ret)
  556. goto err;
  557. ret = srp_init_qp(target, target->qp);
  558. if (ret)
  559. goto err;
  560. while (ib_poll_cq(target->recv_cq, 1, &wc) > 0)
  561. ; /* nothing */
  562. while (ib_poll_cq(target->send_cq, 1, &wc) > 0)
  563. ; /* nothing */
  564. for (i = 0; i < SRP_CMD_SQ_SIZE; ++i) {
  565. struct srp_request *req = &target->req_ring[i];
  566. if (req->scmnd)
  567. srp_reset_req(target, req);
  568. }
  569. INIT_LIST_HEAD(&target->free_tx);
  570. for (i = 0; i < SRP_SQ_SIZE; ++i)
  571. list_add(&target->tx_ring[i]->list, &target->free_tx);
  572. target->qp_in_error = 0;
  573. ret = srp_connect_target(target);
  574. if (ret)
  575. goto err;
  576. if (!srp_change_state(target, SRP_TARGET_CONNECTING, SRP_TARGET_LIVE))
  577. ret = -EAGAIN;
  578. return ret;
  579. err:
  580. shost_printk(KERN_ERR, target->scsi_host,
  581. PFX "reconnect failed (%d), removing target port.\n", ret);
  582. /*
  583. * We couldn't reconnect, so kill our target port off.
  584. * However, we have to defer the real removal because we
  585. * are in the context of the SCSI error handler now, which
  586. * will deadlock if we call scsi_remove_host().
  587. *
  588. * Schedule our work inside the lock to avoid a race with
  589. * the flush_scheduled_work() in srp_remove_one().
  590. */
  591. spin_lock_irq(&target->lock);
  592. if (target->state == SRP_TARGET_CONNECTING) {
  593. target->state = SRP_TARGET_DEAD;
  594. INIT_WORK(&target->work, srp_remove_work);
  595. queue_work(ib_wq, &target->work);
  596. }
  597. spin_unlock_irq(&target->lock);
  598. return ret;
  599. }
  600. static void srp_map_desc(struct srp_map_state *state, dma_addr_t dma_addr,
  601. unsigned int dma_len, u32 rkey)
  602. {
  603. struct srp_direct_buf *desc = state->desc;
  604. desc->va = cpu_to_be64(dma_addr);
  605. desc->key = cpu_to_be32(rkey);
  606. desc->len = cpu_to_be32(dma_len);
  607. state->total_len += dma_len;
  608. state->desc++;
  609. state->ndesc++;
  610. }
  611. static int srp_map_finish_fmr(struct srp_map_state *state,
  612. struct srp_target_port *target)
  613. {
  614. struct srp_device *dev = target->srp_host->srp_dev;
  615. struct ib_pool_fmr *fmr;
  616. u64 io_addr = 0;
  617. if (!state->npages)
  618. return 0;
  619. if (state->npages == 1) {
  620. srp_map_desc(state, state->base_dma_addr, state->fmr_len,
  621. target->rkey);
  622. state->npages = state->fmr_len = 0;
  623. return 0;
  624. }
  625. fmr = ib_fmr_pool_map_phys(dev->fmr_pool, state->pages,
  626. state->npages, io_addr);
  627. if (IS_ERR(fmr))
  628. return PTR_ERR(fmr);
  629. *state->next_fmr++ = fmr;
  630. state->nfmr++;
  631. srp_map_desc(state, 0, state->fmr_len, fmr->fmr->rkey);
  632. state->npages = state->fmr_len = 0;
  633. return 0;
  634. }
  635. static void srp_map_update_start(struct srp_map_state *state,
  636. struct scatterlist *sg, int sg_index,
  637. dma_addr_t dma_addr)
  638. {
  639. state->unmapped_sg = sg;
  640. state->unmapped_index = sg_index;
  641. state->unmapped_addr = dma_addr;
  642. }
  643. static int srp_map_sg_entry(struct srp_map_state *state,
  644. struct srp_target_port *target,
  645. struct scatterlist *sg, int sg_index,
  646. int use_fmr)
  647. {
  648. struct srp_device *dev = target->srp_host->srp_dev;
  649. struct ib_device *ibdev = dev->dev;
  650. dma_addr_t dma_addr = ib_sg_dma_address(ibdev, sg);
  651. unsigned int dma_len = ib_sg_dma_len(ibdev, sg);
  652. unsigned int len;
  653. int ret;
  654. if (!dma_len)
  655. return 0;
  656. if (use_fmr == SRP_MAP_NO_FMR) {
  657. /* Once we're in direct map mode for a request, we don't
  658. * go back to FMR mode, so no need to update anything
  659. * other than the descriptor.
  660. */
  661. srp_map_desc(state, dma_addr, dma_len, target->rkey);
  662. return 0;
  663. }
  664. /* If we start at an offset into the FMR page, don't merge into
  665. * the current FMR. Finish it out, and use the kernel's MR for this
  666. * sg entry. This is to avoid potential bugs on some SRP targets
  667. * that were never quite defined, but went away when the initiator
  668. * avoided using FMR on such page fragments.
  669. */
  670. if (dma_addr & ~dev->fmr_page_mask || dma_len > dev->fmr_max_size) {
  671. ret = srp_map_finish_fmr(state, target);
  672. if (ret)
  673. return ret;
  674. srp_map_desc(state, dma_addr, dma_len, target->rkey);
  675. srp_map_update_start(state, NULL, 0, 0);
  676. return 0;
  677. }
  678. /* If this is the first sg to go into the FMR, save our position.
  679. * We need to know the first unmapped entry, its index, and the
  680. * first unmapped address within that entry to be able to restart
  681. * mapping after an error.
  682. */
  683. if (!state->unmapped_sg)
  684. srp_map_update_start(state, sg, sg_index, dma_addr);
  685. while (dma_len) {
  686. if (state->npages == SRP_FMR_SIZE) {
  687. ret = srp_map_finish_fmr(state, target);
  688. if (ret)
  689. return ret;
  690. srp_map_update_start(state, sg, sg_index, dma_addr);
  691. }
  692. len = min_t(unsigned int, dma_len, dev->fmr_page_size);
  693. if (!state->npages)
  694. state->base_dma_addr = dma_addr;
  695. state->pages[state->npages++] = dma_addr;
  696. state->fmr_len += len;
  697. dma_addr += len;
  698. dma_len -= len;
  699. }
  700. /* If the last entry of the FMR wasn't a full page, then we need to
  701. * close it out and start a new one -- we can only merge at page
  702. * boundries.
  703. */
  704. ret = 0;
  705. if (len != dev->fmr_page_size) {
  706. ret = srp_map_finish_fmr(state, target);
  707. if (!ret)
  708. srp_map_update_start(state, NULL, 0, 0);
  709. }
  710. return ret;
  711. }
  712. static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_target_port *target,
  713. struct srp_request *req)
  714. {
  715. struct scatterlist *scat, *sg;
  716. struct srp_cmd *cmd = req->cmd->buf;
  717. int i, len, nents, count, use_fmr;
  718. struct srp_device *dev;
  719. struct ib_device *ibdev;
  720. struct srp_map_state state;
  721. struct srp_indirect_buf *indirect_hdr;
  722. u32 table_len;
  723. u8 fmt;
  724. if (!scsi_sglist(scmnd) || scmnd->sc_data_direction == DMA_NONE)
  725. return sizeof (struct srp_cmd);
  726. if (scmnd->sc_data_direction != DMA_FROM_DEVICE &&
  727. scmnd->sc_data_direction != DMA_TO_DEVICE) {
  728. shost_printk(KERN_WARNING, target->scsi_host,
  729. PFX "Unhandled data direction %d\n",
  730. scmnd->sc_data_direction);
  731. return -EINVAL;
  732. }
  733. nents = scsi_sg_count(scmnd);
  734. scat = scsi_sglist(scmnd);
  735. dev = target->srp_host->srp_dev;
  736. ibdev = dev->dev;
  737. count = ib_dma_map_sg(ibdev, scat, nents, scmnd->sc_data_direction);
  738. if (unlikely(count == 0))
  739. return -EIO;
  740. fmt = SRP_DATA_DESC_DIRECT;
  741. len = sizeof (struct srp_cmd) + sizeof (struct srp_direct_buf);
  742. if (count == 1) {
  743. /*
  744. * The midlayer only generated a single gather/scatter
  745. * entry, or DMA mapping coalesced everything to a
  746. * single entry. So a direct descriptor along with
  747. * the DMA MR suffices.
  748. */
  749. struct srp_direct_buf *buf = (void *) cmd->add_data;
  750. buf->va = cpu_to_be64(ib_sg_dma_address(ibdev, scat));
  751. buf->key = cpu_to_be32(target->rkey);
  752. buf->len = cpu_to_be32(ib_sg_dma_len(ibdev, scat));
  753. req->nfmr = 0;
  754. goto map_complete;
  755. }
  756. /* We have more than one scatter/gather entry, so build our indirect
  757. * descriptor table, trying to merge as many entries with FMR as we
  758. * can.
  759. */
  760. indirect_hdr = (void *) cmd->add_data;
  761. ib_dma_sync_single_for_cpu(ibdev, req->indirect_dma_addr,
  762. target->indirect_size, DMA_TO_DEVICE);
  763. memset(&state, 0, sizeof(state));
  764. state.desc = req->indirect_desc;
  765. state.pages = req->map_page;
  766. state.next_fmr = req->fmr_list;
  767. use_fmr = dev->fmr_pool ? SRP_MAP_ALLOW_FMR : SRP_MAP_NO_FMR;
  768. for_each_sg(scat, sg, count, i) {
  769. if (srp_map_sg_entry(&state, target, sg, i, use_fmr)) {
  770. /* FMR mapping failed, so backtrack to the first
  771. * unmapped entry and continue on without using FMR.
  772. */
  773. dma_addr_t dma_addr;
  774. unsigned int dma_len;
  775. backtrack:
  776. sg = state.unmapped_sg;
  777. i = state.unmapped_index;
  778. dma_addr = ib_sg_dma_address(ibdev, sg);
  779. dma_len = ib_sg_dma_len(ibdev, sg);
  780. dma_len -= (state.unmapped_addr - dma_addr);
  781. dma_addr = state.unmapped_addr;
  782. use_fmr = SRP_MAP_NO_FMR;
  783. srp_map_desc(&state, dma_addr, dma_len, target->rkey);
  784. }
  785. }
  786. if (use_fmr == SRP_MAP_ALLOW_FMR && srp_map_finish_fmr(&state, target))
  787. goto backtrack;
  788. /* We've mapped the request, now pull as much of the indirect
  789. * descriptor table as we can into the command buffer. If this
  790. * target is not using an external indirect table, we are
  791. * guaranteed to fit into the command, as the SCSI layer won't
  792. * give us more S/G entries than we allow.
  793. */
  794. req->nfmr = state.nfmr;
  795. if (state.ndesc == 1) {
  796. /* FMR mapping was able to collapse this to one entry,
  797. * so use a direct descriptor.
  798. */
  799. struct srp_direct_buf *buf = (void *) cmd->add_data;
  800. *buf = req->indirect_desc[0];
  801. goto map_complete;
  802. }
  803. if (unlikely(target->cmd_sg_cnt < state.ndesc &&
  804. !target->allow_ext_sg)) {
  805. shost_printk(KERN_ERR, target->scsi_host,
  806. "Could not fit S/G list into SRP_CMD\n");
  807. return -EIO;
  808. }
  809. count = min(state.ndesc, target->cmd_sg_cnt);
  810. table_len = state.ndesc * sizeof (struct srp_direct_buf);
  811. fmt = SRP_DATA_DESC_INDIRECT;
  812. len = sizeof(struct srp_cmd) + sizeof (struct srp_indirect_buf);
  813. len += count * sizeof (struct srp_direct_buf);
  814. memcpy(indirect_hdr->desc_list, req->indirect_desc,
  815. count * sizeof (struct srp_direct_buf));
  816. indirect_hdr->table_desc.va = cpu_to_be64(req->indirect_dma_addr);
  817. indirect_hdr->table_desc.key = cpu_to_be32(target->rkey);
  818. indirect_hdr->table_desc.len = cpu_to_be32(table_len);
  819. indirect_hdr->len = cpu_to_be32(state.total_len);
  820. if (scmnd->sc_data_direction == DMA_TO_DEVICE)
  821. cmd->data_out_desc_cnt = count;
  822. else
  823. cmd->data_in_desc_cnt = count;
  824. ib_dma_sync_single_for_device(ibdev, req->indirect_dma_addr, table_len,
  825. DMA_TO_DEVICE);
  826. map_complete:
  827. if (scmnd->sc_data_direction == DMA_TO_DEVICE)
  828. cmd->buf_fmt = fmt << 4;
  829. else
  830. cmd->buf_fmt = fmt;
  831. return len;
  832. }
  833. /*
  834. * Return an IU and possible credit to the free pool
  835. */
  836. static void srp_put_tx_iu(struct srp_target_port *target, struct srp_iu *iu,
  837. enum srp_iu_type iu_type)
  838. {
  839. unsigned long flags;
  840. spin_lock_irqsave(&target->lock, flags);
  841. list_add(&iu->list, &target->free_tx);
  842. if (iu_type != SRP_IU_RSP)
  843. ++target->req_lim;
  844. spin_unlock_irqrestore(&target->lock, flags);
  845. }
  846. /*
  847. * Must be called with target->lock held to protect req_lim and free_tx.
  848. * If IU is not sent, it must be returned using srp_put_tx_iu().
  849. *
  850. * Note:
  851. * An upper limit for the number of allocated information units for each
  852. * request type is:
  853. * - SRP_IU_CMD: SRP_CMD_SQ_SIZE, since the SCSI mid-layer never queues
  854. * more than Scsi_Host.can_queue requests.
  855. * - SRP_IU_TSK_MGMT: SRP_TSK_MGMT_SQ_SIZE.
  856. * - SRP_IU_RSP: 1, since a conforming SRP target never sends more than
  857. * one unanswered SRP request to an initiator.
  858. */
  859. static struct srp_iu *__srp_get_tx_iu(struct srp_target_port *target,
  860. enum srp_iu_type iu_type)
  861. {
  862. s32 rsv = (iu_type == SRP_IU_TSK_MGMT) ? 0 : SRP_TSK_MGMT_SQ_SIZE;
  863. struct srp_iu *iu;
  864. srp_send_completion(target->send_cq, target);
  865. if (list_empty(&target->free_tx))
  866. return NULL;
  867. /* Initiator responses to target requests do not consume credits */
  868. if (iu_type != SRP_IU_RSP) {
  869. if (target->req_lim <= rsv) {
  870. ++target->zero_req_lim;
  871. return NULL;
  872. }
  873. --target->req_lim;
  874. }
  875. iu = list_first_entry(&target->free_tx, struct srp_iu, list);
  876. list_del(&iu->list);
  877. return iu;
  878. }
  879. static int srp_post_send(struct srp_target_port *target,
  880. struct srp_iu *iu, int len)
  881. {
  882. struct ib_sge list;
  883. struct ib_send_wr wr, *bad_wr;
  884. list.addr = iu->dma;
  885. list.length = len;
  886. list.lkey = target->lkey;
  887. wr.next = NULL;
  888. wr.wr_id = (uintptr_t) iu;
  889. wr.sg_list = &list;
  890. wr.num_sge = 1;
  891. wr.opcode = IB_WR_SEND;
  892. wr.send_flags = IB_SEND_SIGNALED;
  893. return ib_post_send(target->qp, &wr, &bad_wr);
  894. }
  895. static int srp_post_recv(struct srp_target_port *target, struct srp_iu *iu)
  896. {
  897. struct ib_recv_wr wr, *bad_wr;
  898. struct ib_sge list;
  899. list.addr = iu->dma;
  900. list.length = iu->size;
  901. list.lkey = target->lkey;
  902. wr.next = NULL;
  903. wr.wr_id = (uintptr_t) iu;
  904. wr.sg_list = &list;
  905. wr.num_sge = 1;
  906. return ib_post_recv(target->qp, &wr, &bad_wr);
  907. }
  908. static void srp_process_rsp(struct srp_target_port *target, struct srp_rsp *rsp)
  909. {
  910. struct srp_request *req;
  911. struct scsi_cmnd *scmnd;
  912. unsigned long flags;
  913. if (unlikely(rsp->tag & SRP_TAG_TSK_MGMT)) {
  914. spin_lock_irqsave(&target->lock, flags);
  915. target->req_lim += be32_to_cpu(rsp->req_lim_delta);
  916. spin_unlock_irqrestore(&target->lock, flags);
  917. target->tsk_mgmt_status = -1;
  918. if (be32_to_cpu(rsp->resp_data_len) >= 4)
  919. target->tsk_mgmt_status = rsp->data[3];
  920. complete(&target->tsk_mgmt_done);
  921. } else {
  922. req = &target->req_ring[rsp->tag];
  923. scmnd = srp_claim_req(target, req, NULL);
  924. if (!scmnd) {
  925. shost_printk(KERN_ERR, target->scsi_host,
  926. "Null scmnd for RSP w/tag %016llx\n",
  927. (unsigned long long) rsp->tag);
  928. spin_lock_irqsave(&target->lock, flags);
  929. target->req_lim += be32_to_cpu(rsp->req_lim_delta);
  930. spin_unlock_irqrestore(&target->lock, flags);
  931. return;
  932. }
  933. scmnd->result = rsp->status;
  934. if (rsp->flags & SRP_RSP_FLAG_SNSVALID) {
  935. memcpy(scmnd->sense_buffer, rsp->data +
  936. be32_to_cpu(rsp->resp_data_len),
  937. min_t(int, be32_to_cpu(rsp->sense_data_len),
  938. SCSI_SENSE_BUFFERSIZE));
  939. }
  940. if (rsp->flags & (SRP_RSP_FLAG_DOOVER | SRP_RSP_FLAG_DOUNDER))
  941. scsi_set_resid(scmnd, be32_to_cpu(rsp->data_out_res_cnt));
  942. else if (rsp->flags & (SRP_RSP_FLAG_DIOVER | SRP_RSP_FLAG_DIUNDER))
  943. scsi_set_resid(scmnd, be32_to_cpu(rsp->data_in_res_cnt));
  944. srp_free_req(target, req, scmnd,
  945. be32_to_cpu(rsp->req_lim_delta));
  946. scmnd->host_scribble = NULL;
  947. scmnd->scsi_done(scmnd);
  948. }
  949. }
  950. static int srp_response_common(struct srp_target_port *target, s32 req_delta,
  951. void *rsp, int len)
  952. {
  953. struct ib_device *dev = target->srp_host->srp_dev->dev;
  954. unsigned long flags;
  955. struct srp_iu *iu;
  956. int err;
  957. spin_lock_irqsave(&target->lock, flags);
  958. target->req_lim += req_delta;
  959. iu = __srp_get_tx_iu(target, SRP_IU_RSP);
  960. spin_unlock_irqrestore(&target->lock, flags);
  961. if (!iu) {
  962. shost_printk(KERN_ERR, target->scsi_host, PFX
  963. "no IU available to send response\n");
  964. return 1;
  965. }
  966. ib_dma_sync_single_for_cpu(dev, iu->dma, len, DMA_TO_DEVICE);
  967. memcpy(iu->buf, rsp, len);
  968. ib_dma_sync_single_for_device(dev, iu->dma, len, DMA_TO_DEVICE);
  969. err = srp_post_send(target, iu, len);
  970. if (err) {
  971. shost_printk(KERN_ERR, target->scsi_host, PFX
  972. "unable to post response: %d\n", err);
  973. srp_put_tx_iu(target, iu, SRP_IU_RSP);
  974. }
  975. return err;
  976. }
  977. static void srp_process_cred_req(struct srp_target_port *target,
  978. struct srp_cred_req *req)
  979. {
  980. struct srp_cred_rsp rsp = {
  981. .opcode = SRP_CRED_RSP,
  982. .tag = req->tag,
  983. };
  984. s32 delta = be32_to_cpu(req->req_lim_delta);
  985. if (srp_response_common(target, delta, &rsp, sizeof rsp))
  986. shost_printk(KERN_ERR, target->scsi_host, PFX
  987. "problems processing SRP_CRED_REQ\n");
  988. }
  989. static void srp_process_aer_req(struct srp_target_port *target,
  990. struct srp_aer_req *req)
  991. {
  992. struct srp_aer_rsp rsp = {
  993. .opcode = SRP_AER_RSP,
  994. .tag = req->tag,
  995. };
  996. s32 delta = be32_to_cpu(req->req_lim_delta);
  997. shost_printk(KERN_ERR, target->scsi_host, PFX
  998. "ignoring AER for LUN %llu\n", be64_to_cpu(req->lun));
  999. if (srp_response_common(target, delta, &rsp, sizeof rsp))
  1000. shost_printk(KERN_ERR, target->scsi_host, PFX
  1001. "problems processing SRP_AER_REQ\n");
  1002. }
  1003. static void srp_handle_recv(struct srp_target_port *target, struct ib_wc *wc)
  1004. {
  1005. struct ib_device *dev = target->srp_host->srp_dev->dev;
  1006. struct srp_iu *iu = (struct srp_iu *) (uintptr_t) wc->wr_id;
  1007. int res;
  1008. u8 opcode;
  1009. ib_dma_sync_single_for_cpu(dev, iu->dma, target->max_ti_iu_len,
  1010. DMA_FROM_DEVICE);
  1011. opcode = *(u8 *) iu->buf;
  1012. if (0) {
  1013. shost_printk(KERN_ERR, target->scsi_host,
  1014. PFX "recv completion, opcode 0x%02x\n", opcode);
  1015. print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 8, 1,
  1016. iu->buf, wc->byte_len, true);
  1017. }
  1018. switch (opcode) {
  1019. case SRP_RSP:
  1020. srp_process_rsp(target, iu->buf);
  1021. break;
  1022. case SRP_CRED_REQ:
  1023. srp_process_cred_req(target, iu->buf);
  1024. break;
  1025. case SRP_AER_REQ:
  1026. srp_process_aer_req(target, iu->buf);
  1027. break;
  1028. case SRP_T_LOGOUT:
  1029. /* XXX Handle target logout */
  1030. shost_printk(KERN_WARNING, target->scsi_host,
  1031. PFX "Got target logout request\n");
  1032. break;
  1033. default:
  1034. shost_printk(KERN_WARNING, target->scsi_host,
  1035. PFX "Unhandled SRP opcode 0x%02x\n", opcode);
  1036. break;
  1037. }
  1038. ib_dma_sync_single_for_device(dev, iu->dma, target->max_ti_iu_len,
  1039. DMA_FROM_DEVICE);
  1040. res = srp_post_recv(target, iu);
  1041. if (res != 0)
  1042. shost_printk(KERN_ERR, target->scsi_host,
  1043. PFX "Recv failed with error code %d\n", res);
  1044. }
  1045. static void srp_recv_completion(struct ib_cq *cq, void *target_ptr)
  1046. {
  1047. struct srp_target_port *target = target_ptr;
  1048. struct ib_wc wc;
  1049. ib_req_notify_cq(cq, IB_CQ_NEXT_COMP);
  1050. while (ib_poll_cq(cq, 1, &wc) > 0) {
  1051. if (wc.status) {
  1052. shost_printk(KERN_ERR, target->scsi_host,
  1053. PFX "failed receive status %d\n",
  1054. wc.status);
  1055. target->qp_in_error = 1;
  1056. break;
  1057. }
  1058. srp_handle_recv(target, &wc);
  1059. }
  1060. }
  1061. static void srp_send_completion(struct ib_cq *cq, void *target_ptr)
  1062. {
  1063. struct srp_target_port *target = target_ptr;
  1064. struct ib_wc wc;
  1065. struct srp_iu *iu;
  1066. while (ib_poll_cq(cq, 1, &wc) > 0) {
  1067. if (wc.status) {
  1068. shost_printk(KERN_ERR, target->scsi_host,
  1069. PFX "failed send status %d\n",
  1070. wc.status);
  1071. target->qp_in_error = 1;
  1072. break;
  1073. }
  1074. iu = (struct srp_iu *) (uintptr_t) wc.wr_id;
  1075. list_add(&iu->list, &target->free_tx);
  1076. }
  1077. }
  1078. static int srp_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scmnd)
  1079. {
  1080. struct srp_target_port *target = host_to_target(shost);
  1081. struct srp_request *req;
  1082. struct srp_iu *iu;
  1083. struct srp_cmd *cmd;
  1084. struct ib_device *dev;
  1085. unsigned long flags;
  1086. int len;
  1087. if (target->state == SRP_TARGET_CONNECTING)
  1088. goto err;
  1089. if (target->state == SRP_TARGET_DEAD ||
  1090. target->state == SRP_TARGET_REMOVED) {
  1091. scmnd->result = DID_BAD_TARGET << 16;
  1092. scmnd->scsi_done(scmnd);
  1093. return 0;
  1094. }
  1095. spin_lock_irqsave(&target->lock, flags);
  1096. iu = __srp_get_tx_iu(target, SRP_IU_CMD);
  1097. if (!iu)
  1098. goto err_unlock;
  1099. req = list_first_entry(&target->free_reqs, struct srp_request, list);
  1100. list_del(&req->list);
  1101. spin_unlock_irqrestore(&target->lock, flags);
  1102. dev = target->srp_host->srp_dev->dev;
  1103. ib_dma_sync_single_for_cpu(dev, iu->dma, target->max_iu_len,
  1104. DMA_TO_DEVICE);
  1105. scmnd->result = 0;
  1106. scmnd->host_scribble = (void *) req;
  1107. cmd = iu->buf;
  1108. memset(cmd, 0, sizeof *cmd);
  1109. cmd->opcode = SRP_CMD;
  1110. cmd->lun = cpu_to_be64((u64) scmnd->device->lun << 48);
  1111. cmd->tag = req->index;
  1112. memcpy(cmd->cdb, scmnd->cmnd, scmnd->cmd_len);
  1113. req->scmnd = scmnd;
  1114. req->cmd = iu;
  1115. len = srp_map_data(scmnd, target, req);
  1116. if (len < 0) {
  1117. shost_printk(KERN_ERR, target->scsi_host,
  1118. PFX "Failed to map data\n");
  1119. goto err_iu;
  1120. }
  1121. ib_dma_sync_single_for_device(dev, iu->dma, target->max_iu_len,
  1122. DMA_TO_DEVICE);
  1123. if (srp_post_send(target, iu, len)) {
  1124. shost_printk(KERN_ERR, target->scsi_host, PFX "Send failed\n");
  1125. goto err_unmap;
  1126. }
  1127. return 0;
  1128. err_unmap:
  1129. srp_unmap_data(scmnd, target, req);
  1130. err_iu:
  1131. srp_put_tx_iu(target, iu, SRP_IU_CMD);
  1132. /*
  1133. * Avoid that the loops that iterate over the request ring can
  1134. * encounter a dangling SCSI command pointer.
  1135. */
  1136. req->scmnd = NULL;
  1137. spin_lock_irqsave(&target->lock, flags);
  1138. list_add(&req->list, &target->free_reqs);
  1139. err_unlock:
  1140. spin_unlock_irqrestore(&target->lock, flags);
  1141. err:
  1142. return SCSI_MLQUEUE_HOST_BUSY;
  1143. }
  1144. static int srp_alloc_iu_bufs(struct srp_target_port *target)
  1145. {
  1146. int i;
  1147. for (i = 0; i < SRP_RQ_SIZE; ++i) {
  1148. target->rx_ring[i] = srp_alloc_iu(target->srp_host,
  1149. target->max_ti_iu_len,
  1150. GFP_KERNEL, DMA_FROM_DEVICE);
  1151. if (!target->rx_ring[i])
  1152. goto err;
  1153. }
  1154. for (i = 0; i < SRP_SQ_SIZE; ++i) {
  1155. target->tx_ring[i] = srp_alloc_iu(target->srp_host,
  1156. target->max_iu_len,
  1157. GFP_KERNEL, DMA_TO_DEVICE);
  1158. if (!target->tx_ring[i])
  1159. goto err;
  1160. list_add(&target->tx_ring[i]->list, &target->free_tx);
  1161. }
  1162. return 0;
  1163. err:
  1164. for (i = 0; i < SRP_RQ_SIZE; ++i) {
  1165. srp_free_iu(target->srp_host, target->rx_ring[i]);
  1166. target->rx_ring[i] = NULL;
  1167. }
  1168. for (i = 0; i < SRP_SQ_SIZE; ++i) {
  1169. srp_free_iu(target->srp_host, target->tx_ring[i]);
  1170. target->tx_ring[i] = NULL;
  1171. }
  1172. return -ENOMEM;
  1173. }
  1174. static void srp_cm_rep_handler(struct ib_cm_id *cm_id,
  1175. struct srp_login_rsp *lrsp,
  1176. struct srp_target_port *target)
  1177. {
  1178. struct ib_qp_attr *qp_attr = NULL;
  1179. int attr_mask = 0;
  1180. int ret;
  1181. int i;
  1182. if (lrsp->opcode == SRP_LOGIN_RSP) {
  1183. target->max_ti_iu_len = be32_to_cpu(lrsp->max_ti_iu_len);
  1184. target->req_lim = be32_to_cpu(lrsp->req_lim_delta);
  1185. /*
  1186. * Reserve credits for task management so we don't
  1187. * bounce requests back to the SCSI mid-layer.
  1188. */
  1189. target->scsi_host->can_queue
  1190. = min(target->req_lim - SRP_TSK_MGMT_SQ_SIZE,
  1191. target->scsi_host->can_queue);
  1192. } else {
  1193. shost_printk(KERN_WARNING, target->scsi_host,
  1194. PFX "Unhandled RSP opcode %#x\n", lrsp->opcode);
  1195. ret = -ECONNRESET;
  1196. goto error;
  1197. }
  1198. if (!target->rx_ring[0]) {
  1199. ret = srp_alloc_iu_bufs(target);
  1200. if (ret)
  1201. goto error;
  1202. }
  1203. ret = -ENOMEM;
  1204. qp_attr = kmalloc(sizeof *qp_attr, GFP_KERNEL);
  1205. if (!qp_attr)
  1206. goto error;
  1207. qp_attr->qp_state = IB_QPS_RTR;
  1208. ret = ib_cm_init_qp_attr(cm_id, qp_attr, &attr_mask);
  1209. if (ret)
  1210. goto error_free;
  1211. ret = ib_modify_qp(target->qp, qp_attr, attr_mask);
  1212. if (ret)
  1213. goto error_free;
  1214. for (i = 0; i < SRP_RQ_SIZE; i++) {
  1215. struct srp_iu *iu = target->rx_ring[i];
  1216. ret = srp_post_recv(target, iu);
  1217. if (ret)
  1218. goto error_free;
  1219. }
  1220. qp_attr->qp_state = IB_QPS_RTS;
  1221. ret = ib_cm_init_qp_attr(cm_id, qp_attr, &attr_mask);
  1222. if (ret)
  1223. goto error_free;
  1224. ret = ib_modify_qp(target->qp, qp_attr, attr_mask);
  1225. if (ret)
  1226. goto error_free;
  1227. ret = ib_send_cm_rtu(cm_id, NULL, 0);
  1228. error_free:
  1229. kfree(qp_attr);
  1230. error:
  1231. target->status = ret;
  1232. }
  1233. static void srp_cm_rej_handler(struct ib_cm_id *cm_id,
  1234. struct ib_cm_event *event,
  1235. struct srp_target_port *target)
  1236. {
  1237. struct Scsi_Host *shost = target->scsi_host;
  1238. struct ib_class_port_info *cpi;
  1239. int opcode;
  1240. switch (event->param.rej_rcvd.reason) {
  1241. case IB_CM_REJ_PORT_CM_REDIRECT:
  1242. cpi = event->param.rej_rcvd.ari;
  1243. target->path.dlid = cpi->redirect_lid;
  1244. target->path.pkey = cpi->redirect_pkey;
  1245. cm_id->remote_cm_qpn = be32_to_cpu(cpi->redirect_qp) & 0x00ffffff;
  1246. memcpy(target->path.dgid.raw, cpi->redirect_gid, 16);
  1247. target->status = target->path.dlid ?
  1248. SRP_DLID_REDIRECT : SRP_PORT_REDIRECT;
  1249. break;
  1250. case IB_CM_REJ_PORT_REDIRECT:
  1251. if (srp_target_is_topspin(target)) {
  1252. /*
  1253. * Topspin/Cisco SRP gateways incorrectly send
  1254. * reject reason code 25 when they mean 24
  1255. * (port redirect).
  1256. */
  1257. memcpy(target->path.dgid.raw,
  1258. event->param.rej_rcvd.ari, 16);
  1259. shost_printk(KERN_DEBUG, shost,
  1260. PFX "Topspin/Cisco redirect to target port GID %016llx%016llx\n",
  1261. (unsigned long long) be64_to_cpu(target->path.dgid.global.subnet_prefix),
  1262. (unsigned long long) be64_to_cpu(target->path.dgid.global.interface_id));
  1263. target->status = SRP_PORT_REDIRECT;
  1264. } else {
  1265. shost_printk(KERN_WARNING, shost,
  1266. " REJ reason: IB_CM_REJ_PORT_REDIRECT\n");
  1267. target->status = -ECONNRESET;
  1268. }
  1269. break;
  1270. case IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID:
  1271. shost_printk(KERN_WARNING, shost,
  1272. " REJ reason: IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID\n");
  1273. target->status = -ECONNRESET;
  1274. break;
  1275. case IB_CM_REJ_CONSUMER_DEFINED:
  1276. opcode = *(u8 *) event->private_data;
  1277. if (opcode == SRP_LOGIN_REJ) {
  1278. struct srp_login_rej *rej = event->private_data;
  1279. u32 reason = be32_to_cpu(rej->reason);
  1280. if (reason == SRP_LOGIN_REJ_REQ_IT_IU_LENGTH_TOO_LARGE)
  1281. shost_printk(KERN_WARNING, shost,
  1282. PFX "SRP_LOGIN_REJ: requested max_it_iu_len too large\n");
  1283. else
  1284. shost_printk(KERN_WARNING, shost,
  1285. PFX "SRP LOGIN REJECTED, reason 0x%08x\n", reason);
  1286. } else
  1287. shost_printk(KERN_WARNING, shost,
  1288. " REJ reason: IB_CM_REJ_CONSUMER_DEFINED,"
  1289. " opcode 0x%02x\n", opcode);
  1290. target->status = -ECONNRESET;
  1291. break;
  1292. case IB_CM_REJ_STALE_CONN:
  1293. shost_printk(KERN_WARNING, shost, " REJ reason: stale connection\n");
  1294. target->status = SRP_STALE_CONN;
  1295. break;
  1296. default:
  1297. shost_printk(KERN_WARNING, shost, " REJ reason 0x%x\n",
  1298. event->param.rej_rcvd.reason);
  1299. target->status = -ECONNRESET;
  1300. }
  1301. }
  1302. static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event)
  1303. {
  1304. struct srp_target_port *target = cm_id->context;
  1305. int comp = 0;
  1306. switch (event->event) {
  1307. case IB_CM_REQ_ERROR:
  1308. shost_printk(KERN_DEBUG, target->scsi_host,
  1309. PFX "Sending CM REQ failed\n");
  1310. comp = 1;
  1311. target->status = -ECONNRESET;
  1312. break;
  1313. case IB_CM_REP_RECEIVED:
  1314. comp = 1;
  1315. srp_cm_rep_handler(cm_id, event->private_data, target);
  1316. break;
  1317. case IB_CM_REJ_RECEIVED:
  1318. shost_printk(KERN_DEBUG, target->scsi_host, PFX "REJ received\n");
  1319. comp = 1;
  1320. srp_cm_rej_handler(cm_id, event, target);
  1321. break;
  1322. case IB_CM_DREQ_RECEIVED:
  1323. shost_printk(KERN_WARNING, target->scsi_host,
  1324. PFX "DREQ received - connection closed\n");
  1325. if (ib_send_cm_drep(cm_id, NULL, 0))
  1326. shost_printk(KERN_ERR, target->scsi_host,
  1327. PFX "Sending CM DREP failed\n");
  1328. break;
  1329. case IB_CM_TIMEWAIT_EXIT:
  1330. shost_printk(KERN_ERR, target->scsi_host,
  1331. PFX "connection closed\n");
  1332. comp = 1;
  1333. target->status = 0;
  1334. break;
  1335. case IB_CM_MRA_RECEIVED:
  1336. case IB_CM_DREQ_ERROR:
  1337. case IB_CM_DREP_RECEIVED:
  1338. break;
  1339. default:
  1340. shost_printk(KERN_WARNING, target->scsi_host,
  1341. PFX "Unhandled CM event %d\n", event->event);
  1342. break;
  1343. }
  1344. if (comp)
  1345. complete(&target->done);
  1346. return 0;
  1347. }
  1348. static int srp_send_tsk_mgmt(struct srp_target_port *target,
  1349. u64 req_tag, unsigned int lun, u8 func)
  1350. {
  1351. struct ib_device *dev = target->srp_host->srp_dev->dev;
  1352. struct srp_iu *iu;
  1353. struct srp_tsk_mgmt *tsk_mgmt;
  1354. if (target->state == SRP_TARGET_DEAD ||
  1355. target->state == SRP_TARGET_REMOVED)
  1356. return -1;
  1357. init_completion(&target->tsk_mgmt_done);
  1358. spin_lock_irq(&target->lock);
  1359. iu = __srp_get_tx_iu(target, SRP_IU_TSK_MGMT);
  1360. spin_unlock_irq(&target->lock);
  1361. if (!iu)
  1362. return -1;
  1363. ib_dma_sync_single_for_cpu(dev, iu->dma, sizeof *tsk_mgmt,
  1364. DMA_TO_DEVICE);
  1365. tsk_mgmt = iu->buf;
  1366. memset(tsk_mgmt, 0, sizeof *tsk_mgmt);
  1367. tsk_mgmt->opcode = SRP_TSK_MGMT;
  1368. tsk_mgmt->lun = cpu_to_be64((u64) lun << 48);
  1369. tsk_mgmt->tag = req_tag | SRP_TAG_TSK_MGMT;
  1370. tsk_mgmt->tsk_mgmt_func = func;
  1371. tsk_mgmt->task_tag = req_tag;
  1372. ib_dma_sync_single_for_device(dev, iu->dma, sizeof *tsk_mgmt,
  1373. DMA_TO_DEVICE);
  1374. if (srp_post_send(target, iu, sizeof *tsk_mgmt)) {
  1375. srp_put_tx_iu(target, iu, SRP_IU_TSK_MGMT);
  1376. return -1;
  1377. }
  1378. if (!wait_for_completion_timeout(&target->tsk_mgmt_done,
  1379. msecs_to_jiffies(SRP_ABORT_TIMEOUT_MS)))
  1380. return -1;
  1381. return 0;
  1382. }
  1383. static int srp_abort(struct scsi_cmnd *scmnd)
  1384. {
  1385. struct srp_target_port *target = host_to_target(scmnd->device->host);
  1386. struct srp_request *req = (struct srp_request *) scmnd->host_scribble;
  1387. shost_printk(KERN_ERR, target->scsi_host, "SRP abort called\n");
  1388. if (!req || target->qp_in_error || !srp_claim_req(target, req, scmnd))
  1389. return FAILED;
  1390. srp_send_tsk_mgmt(target, req->index, scmnd->device->lun,
  1391. SRP_TSK_ABORT_TASK);
  1392. srp_free_req(target, req, scmnd, 0);
  1393. scmnd->result = DID_ABORT << 16;
  1394. scmnd->scsi_done(scmnd);
  1395. return SUCCESS;
  1396. }
  1397. static int srp_reset_device(struct scsi_cmnd *scmnd)
  1398. {
  1399. struct srp_target_port *target = host_to_target(scmnd->device->host);
  1400. int i;
  1401. shost_printk(KERN_ERR, target->scsi_host, "SRP reset_device called\n");
  1402. if (target->qp_in_error)
  1403. return FAILED;
  1404. if (srp_send_tsk_mgmt(target, SRP_TAG_NO_REQ, scmnd->device->lun,
  1405. SRP_TSK_LUN_RESET))
  1406. return FAILED;
  1407. if (target->tsk_mgmt_status)
  1408. return FAILED;
  1409. for (i = 0; i < SRP_CMD_SQ_SIZE; ++i) {
  1410. struct srp_request *req = &target->req_ring[i];
  1411. if (req->scmnd && req->scmnd->device == scmnd->device)
  1412. srp_reset_req(target, req);
  1413. }
  1414. return SUCCESS;
  1415. }
  1416. static int srp_reset_host(struct scsi_cmnd *scmnd)
  1417. {
  1418. struct srp_target_port *target = host_to_target(scmnd->device->host);
  1419. int ret = FAILED;
  1420. shost_printk(KERN_ERR, target->scsi_host, PFX "SRP reset_host called\n");
  1421. if (!srp_reconnect_target(target))
  1422. ret = SUCCESS;
  1423. return ret;
  1424. }
  1425. static ssize_t show_id_ext(struct device *dev, struct device_attribute *attr,
  1426. char *buf)
  1427. {
  1428. struct srp_target_port *target = host_to_target(class_to_shost(dev));
  1429. return sprintf(buf, "0x%016llx\n",
  1430. (unsigned long long) be64_to_cpu(target->id_ext));
  1431. }
  1432. static ssize_t show_ioc_guid(struct device *dev, struct device_attribute *attr,
  1433. char *buf)
  1434. {
  1435. struct srp_target_port *target = host_to_target(class_to_shost(dev));
  1436. return sprintf(buf, "0x%016llx\n",
  1437. (unsigned long long) be64_to_cpu(target->ioc_guid));
  1438. }
  1439. static ssize_t show_service_id(struct device *dev,
  1440. struct device_attribute *attr, char *buf)
  1441. {
  1442. struct srp_target_port *target = host_to_target(class_to_shost(dev));
  1443. return sprintf(buf, "0x%016llx\n",
  1444. (unsigned long long) be64_to_cpu(target->service_id));
  1445. }
  1446. static ssize_t show_pkey(struct device *dev, struct device_attribute *attr,
  1447. char *buf)
  1448. {
  1449. struct srp_target_port *target = host_to_target(class_to_shost(dev));
  1450. return sprintf(buf, "0x%04x\n", be16_to_cpu(target->path.pkey));
  1451. }
  1452. static ssize_t show_dgid(struct device *dev, struct device_attribute *attr,
  1453. char *buf)
  1454. {
  1455. struct srp_target_port *target = host_to_target(class_to_shost(dev));
  1456. return sprintf(buf, "%pI6\n", target->path.dgid.raw);
  1457. }
  1458. static ssize_t show_orig_dgid(struct device *dev,
  1459. struct device_attribute *attr, char *buf)
  1460. {
  1461. struct srp_target_port *target = host_to_target(class_to_shost(dev));
  1462. return sprintf(buf, "%pI6\n", target->orig_dgid);
  1463. }
  1464. static ssize_t show_req_lim(struct device *dev,
  1465. struct device_attribute *attr, char *buf)
  1466. {
  1467. struct srp_target_port *target = host_to_target(class_to_shost(dev));
  1468. return sprintf(buf, "%d\n", target->req_lim);
  1469. }
  1470. static ssize_t show_zero_req_lim(struct device *dev,
  1471. struct device_attribute *attr, char *buf)
  1472. {
  1473. struct srp_target_port *target = host_to_target(class_to_shost(dev));
  1474. return sprintf(buf, "%d\n", target->zero_req_lim);
  1475. }
  1476. static ssize_t show_local_ib_port(struct device *dev,
  1477. struct device_attribute *attr, char *buf)
  1478. {
  1479. struct srp_target_port *target = host_to_target(class_to_shost(dev));
  1480. return sprintf(buf, "%d\n", target->srp_host->port);
  1481. }
  1482. static ssize_t show_local_ib_device(struct device *dev,
  1483. struct device_attribute *attr, char *buf)
  1484. {
  1485. struct srp_target_port *target = host_to_target(class_to_shost(dev));
  1486. return sprintf(buf, "%s\n", target->srp_host->srp_dev->dev->name);
  1487. }
  1488. static ssize_t show_cmd_sg_entries(struct device *dev,
  1489. struct device_attribute *attr, char *buf)
  1490. {
  1491. struct srp_target_port *target = host_to_target(class_to_shost(dev));
  1492. return sprintf(buf, "%u\n", target->cmd_sg_cnt);
  1493. }
  1494. static ssize_t show_allow_ext_sg(struct device *dev,
  1495. struct device_attribute *attr, char *buf)
  1496. {
  1497. struct srp_target_port *target = host_to_target(class_to_shost(dev));
  1498. return sprintf(buf, "%s\n", target->allow_ext_sg ? "true" : "false");
  1499. }
  1500. static DEVICE_ATTR(id_ext, S_IRUGO, show_id_ext, NULL);
  1501. static DEVICE_ATTR(ioc_guid, S_IRUGO, show_ioc_guid, NULL);
  1502. static DEVICE_ATTR(service_id, S_IRUGO, show_service_id, NULL);
  1503. static DEVICE_ATTR(pkey, S_IRUGO, show_pkey, NULL);
  1504. static DEVICE_ATTR(dgid, S_IRUGO, show_dgid, NULL);
  1505. static DEVICE_ATTR(orig_dgid, S_IRUGO, show_orig_dgid, NULL);
  1506. static DEVICE_ATTR(req_lim, S_IRUGO, show_req_lim, NULL);
  1507. static DEVICE_ATTR(zero_req_lim, S_IRUGO, show_zero_req_lim, NULL);
  1508. static DEVICE_ATTR(local_ib_port, S_IRUGO, show_local_ib_port, NULL);
  1509. static DEVICE_ATTR(local_ib_device, S_IRUGO, show_local_ib_device, NULL);
  1510. static DEVICE_ATTR(cmd_sg_entries, S_IRUGO, show_cmd_sg_entries, NULL);
  1511. static DEVICE_ATTR(allow_ext_sg, S_IRUGO, show_allow_ext_sg, NULL);
  1512. static struct device_attribute *srp_host_attrs[] = {
  1513. &dev_attr_id_ext,
  1514. &dev_attr_ioc_guid,
  1515. &dev_attr_service_id,
  1516. &dev_attr_pkey,
  1517. &dev_attr_dgid,
  1518. &dev_attr_orig_dgid,
  1519. &dev_attr_req_lim,
  1520. &dev_attr_zero_req_lim,
  1521. &dev_attr_local_ib_port,
  1522. &dev_attr_local_ib_device,
  1523. &dev_attr_cmd_sg_entries,
  1524. &dev_attr_allow_ext_sg,
  1525. NULL
  1526. };
  1527. static struct scsi_host_template srp_template = {
  1528. .module = THIS_MODULE,
  1529. .name = "InfiniBand SRP initiator",
  1530. .proc_name = DRV_NAME,
  1531. .info = srp_target_info,
  1532. .queuecommand = srp_queuecommand,
  1533. .eh_abort_handler = srp_abort,
  1534. .eh_device_reset_handler = srp_reset_device,
  1535. .eh_host_reset_handler = srp_reset_host,
  1536. .sg_tablesize = SRP_DEF_SG_TABLESIZE,
  1537. .can_queue = SRP_CMD_SQ_SIZE,
  1538. .this_id = -1,
  1539. .cmd_per_lun = SRP_CMD_SQ_SIZE,
  1540. .use_clustering = ENABLE_CLUSTERING,
  1541. .shost_attrs = srp_host_attrs
  1542. };
  1543. static int srp_add_target(struct srp_host *host, struct srp_target_port *target)
  1544. {
  1545. struct srp_rport_identifiers ids;
  1546. struct srp_rport *rport;
  1547. sprintf(target->target_name, "SRP.T10:%016llX",
  1548. (unsigned long long) be64_to_cpu(target->id_ext));
  1549. if (scsi_add_host(target->scsi_host, host->srp_dev->dev->dma_device))
  1550. return -ENODEV;
  1551. memcpy(ids.port_id, &target->id_ext, 8);
  1552. memcpy(ids.port_id + 8, &target->ioc_guid, 8);
  1553. ids.roles = SRP_RPORT_ROLE_TARGET;
  1554. rport = srp_rport_add(target->scsi_host, &ids);
  1555. if (IS_ERR(rport)) {
  1556. scsi_remove_host(target->scsi_host);
  1557. return PTR_ERR(rport);
  1558. }
  1559. spin_lock(&host->target_lock);
  1560. list_add_tail(&target->list, &host->target_list);
  1561. spin_unlock(&host->target_lock);
  1562. target->state = SRP_TARGET_LIVE;
  1563. scsi_scan_target(&target->scsi_host->shost_gendev,
  1564. 0, target->scsi_id, SCAN_WILD_CARD, 0);
  1565. return 0;
  1566. }
  1567. static void srp_release_dev(struct device *dev)
  1568. {
  1569. struct srp_host *host =
  1570. container_of(dev, struct srp_host, dev);
  1571. complete(&host->released);
  1572. }
  1573. static struct class srp_class = {
  1574. .name = "infiniband_srp",
  1575. .dev_release = srp_release_dev
  1576. };
  1577. /*
  1578. * Target ports are added by writing
  1579. *
  1580. * id_ext=<SRP ID ext>,ioc_guid=<SRP IOC GUID>,dgid=<dest GID>,
  1581. * pkey=<P_Key>,service_id=<service ID>
  1582. *
  1583. * to the add_target sysfs attribute.
  1584. */
  1585. enum {
  1586. SRP_OPT_ERR = 0,
  1587. SRP_OPT_ID_EXT = 1 << 0,
  1588. SRP_OPT_IOC_GUID = 1 << 1,
  1589. SRP_OPT_DGID = 1 << 2,
  1590. SRP_OPT_PKEY = 1 << 3,
  1591. SRP_OPT_SERVICE_ID = 1 << 4,
  1592. SRP_OPT_MAX_SECT = 1 << 5,
  1593. SRP_OPT_MAX_CMD_PER_LUN = 1 << 6,
  1594. SRP_OPT_IO_CLASS = 1 << 7,
  1595. SRP_OPT_INITIATOR_EXT = 1 << 8,
  1596. SRP_OPT_CMD_SG_ENTRIES = 1 << 9,
  1597. SRP_OPT_ALLOW_EXT_SG = 1 << 10,
  1598. SRP_OPT_SG_TABLESIZE = 1 << 11,
  1599. SRP_OPT_ALL = (SRP_OPT_ID_EXT |
  1600. SRP_OPT_IOC_GUID |
  1601. SRP_OPT_DGID |
  1602. SRP_OPT_PKEY |
  1603. SRP_OPT_SERVICE_ID),
  1604. };
  1605. static const match_table_t srp_opt_tokens = {
  1606. { SRP_OPT_ID_EXT, "id_ext=%s" },
  1607. { SRP_OPT_IOC_GUID, "ioc_guid=%s" },
  1608. { SRP_OPT_DGID, "dgid=%s" },
  1609. { SRP_OPT_PKEY, "pkey=%x" },
  1610. { SRP_OPT_SERVICE_ID, "service_id=%s" },
  1611. { SRP_OPT_MAX_SECT, "max_sect=%d" },
  1612. { SRP_OPT_MAX_CMD_PER_LUN, "max_cmd_per_lun=%d" },
  1613. { SRP_OPT_IO_CLASS, "io_class=%x" },
  1614. { SRP_OPT_INITIATOR_EXT, "initiator_ext=%s" },
  1615. { SRP_OPT_CMD_SG_ENTRIES, "cmd_sg_entries=%u" },
  1616. { SRP_OPT_ALLOW_EXT_SG, "allow_ext_sg=%u" },
  1617. { SRP_OPT_SG_TABLESIZE, "sg_tablesize=%u" },
  1618. { SRP_OPT_ERR, NULL }
  1619. };
  1620. static int srp_parse_options(const char *buf, struct srp_target_port *target)
  1621. {
  1622. char *options, *sep_opt;
  1623. char *p;
  1624. char dgid[3];
  1625. substring_t args[MAX_OPT_ARGS];
  1626. int opt_mask = 0;
  1627. int token;
  1628. int ret = -EINVAL;
  1629. int i;
  1630. options = kstrdup(buf, GFP_KERNEL);
  1631. if (!options)
  1632. return -ENOMEM;
  1633. sep_opt = options;
  1634. while ((p = strsep(&sep_opt, ",")) != NULL) {
  1635. if (!*p)
  1636. continue;
  1637. token = match_token(p, srp_opt_tokens, args);
  1638. opt_mask |= token;
  1639. switch (token) {
  1640. case SRP_OPT_ID_EXT:
  1641. p = match_strdup(args);
  1642. if (!p) {
  1643. ret = -ENOMEM;
  1644. goto out;
  1645. }
  1646. target->id_ext = cpu_to_be64(simple_strtoull(p, NULL, 16));
  1647. kfree(p);
  1648. break;
  1649. case SRP_OPT_IOC_GUID:
  1650. p = match_strdup(args);
  1651. if (!p) {
  1652. ret = -ENOMEM;
  1653. goto out;
  1654. }
  1655. target->ioc_guid = cpu_to_be64(simple_strtoull(p, NULL, 16));
  1656. kfree(p);
  1657. break;
  1658. case SRP_OPT_DGID:
  1659. p = match_strdup(args);
  1660. if (!p) {
  1661. ret = -ENOMEM;
  1662. goto out;
  1663. }
  1664. if (strlen(p) != 32) {
  1665. pr_warn("bad dest GID parameter '%s'\n", p);
  1666. kfree(p);
  1667. goto out;
  1668. }
  1669. for (i = 0; i < 16; ++i) {
  1670. strlcpy(dgid, p + i * 2, 3);
  1671. target->path.dgid.raw[i] = simple_strtoul(dgid, NULL, 16);
  1672. }
  1673. kfree(p);
  1674. memcpy(target->orig_dgid, target->path.dgid.raw, 16);
  1675. break;
  1676. case SRP_OPT_PKEY:
  1677. if (match_hex(args, &token)) {
  1678. pr_warn("bad P_Key parameter '%s'\n", p);
  1679. goto out;
  1680. }
  1681. target->path.pkey = cpu_to_be16(token);
  1682. break;
  1683. case SRP_OPT_SERVICE_ID:
  1684. p = match_strdup(args);
  1685. if (!p) {
  1686. ret = -ENOMEM;
  1687. goto out;
  1688. }
  1689. target->service_id = cpu_to_be64(simple_strtoull(p, NULL, 16));
  1690. target->path.service_id = target->service_id;
  1691. kfree(p);
  1692. break;
  1693. case SRP_OPT_MAX_SECT:
  1694. if (match_int(args, &token)) {
  1695. pr_warn("bad max sect parameter '%s'\n", p);
  1696. goto out;
  1697. }
  1698. target->scsi_host->max_sectors = token;
  1699. break;
  1700. case SRP_OPT_MAX_CMD_PER_LUN:
  1701. if (match_int(args, &token)) {
  1702. pr_warn("bad max cmd_per_lun parameter '%s'\n",
  1703. p);
  1704. goto out;
  1705. }
  1706. target->scsi_host->cmd_per_lun = min(token, SRP_CMD_SQ_SIZE);
  1707. break;
  1708. case SRP_OPT_IO_CLASS:
  1709. if (match_hex(args, &token)) {
  1710. pr_warn("bad IO class parameter '%s'\n", p);
  1711. goto out;
  1712. }
  1713. if (token != SRP_REV10_IB_IO_CLASS &&
  1714. token != SRP_REV16A_IB_IO_CLASS) {
  1715. pr_warn("unknown IO class parameter value %x specified (use %x or %x).\n",
  1716. token, SRP_REV10_IB_IO_CLASS,
  1717. SRP_REV16A_IB_IO_CLASS);
  1718. goto out;
  1719. }
  1720. target->io_class = token;
  1721. break;
  1722. case SRP_OPT_INITIATOR_EXT:
  1723. p = match_strdup(args);
  1724. if (!p) {
  1725. ret = -ENOMEM;
  1726. goto out;
  1727. }
  1728. target->initiator_ext = cpu_to_be64(simple_strtoull(p, NULL, 16));
  1729. kfree(p);
  1730. break;
  1731. case SRP_OPT_CMD_SG_ENTRIES:
  1732. if (match_int(args, &token) || token < 1 || token > 255) {
  1733. pr_warn("bad max cmd_sg_entries parameter '%s'\n",
  1734. p);
  1735. goto out;
  1736. }
  1737. target->cmd_sg_cnt = token;
  1738. break;
  1739. case SRP_OPT_ALLOW_EXT_SG:
  1740. if (match_int(args, &token)) {
  1741. pr_warn("bad allow_ext_sg parameter '%s'\n", p);
  1742. goto out;
  1743. }
  1744. target->allow_ext_sg = !!token;
  1745. break;
  1746. case SRP_OPT_SG_TABLESIZE:
  1747. if (match_int(args, &token) || token < 1 ||
  1748. token > SCSI_MAX_SG_CHAIN_SEGMENTS) {
  1749. pr_warn("bad max sg_tablesize parameter '%s'\n",
  1750. p);
  1751. goto out;
  1752. }
  1753. target->sg_tablesize = token;
  1754. break;
  1755. default:
  1756. pr_warn("unknown parameter or missing value '%s' in target creation request\n",
  1757. p);
  1758. goto out;
  1759. }
  1760. }
  1761. if ((opt_mask & SRP_OPT_ALL) == SRP_OPT_ALL)
  1762. ret = 0;
  1763. else
  1764. for (i = 0; i < ARRAY_SIZE(srp_opt_tokens); ++i)
  1765. if ((srp_opt_tokens[i].token & SRP_OPT_ALL) &&
  1766. !(srp_opt_tokens[i].token & opt_mask))
  1767. pr_warn("target creation request is missing parameter '%s'\n",
  1768. srp_opt_tokens[i].pattern);
  1769. out:
  1770. kfree(options);
  1771. return ret;
  1772. }
  1773. static ssize_t srp_create_target(struct device *dev,
  1774. struct device_attribute *attr,
  1775. const char *buf, size_t count)
  1776. {
  1777. struct srp_host *host =
  1778. container_of(dev, struct srp_host, dev);
  1779. struct Scsi_Host *target_host;
  1780. struct srp_target_port *target;
  1781. struct ib_device *ibdev = host->srp_dev->dev;
  1782. dma_addr_t dma_addr;
  1783. int i, ret;
  1784. target_host = scsi_host_alloc(&srp_template,
  1785. sizeof (struct srp_target_port));
  1786. if (!target_host)
  1787. return -ENOMEM;
  1788. target_host->transportt = ib_srp_transport_template;
  1789. target_host->max_channel = 0;
  1790. target_host->max_id = 1;
  1791. target_host->max_lun = SRP_MAX_LUN;
  1792. target_host->max_cmd_len = sizeof ((struct srp_cmd *) (void *) 0L)->cdb;
  1793. target = host_to_target(target_host);
  1794. target->io_class = SRP_REV16A_IB_IO_CLASS;
  1795. target->scsi_host = target_host;
  1796. target->srp_host = host;
  1797. target->lkey = host->srp_dev->mr->lkey;
  1798. target->rkey = host->srp_dev->mr->rkey;
  1799. target->cmd_sg_cnt = cmd_sg_entries;
  1800. target->sg_tablesize = indirect_sg_entries ? : cmd_sg_entries;
  1801. target->allow_ext_sg = allow_ext_sg;
  1802. ret = srp_parse_options(buf, target);
  1803. if (ret)
  1804. goto err;
  1805. if (!host->srp_dev->fmr_pool && !target->allow_ext_sg &&
  1806. target->cmd_sg_cnt < target->sg_tablesize) {
  1807. pr_warn("No FMR pool and no external indirect descriptors, limiting sg_tablesize to cmd_sg_cnt\n");
  1808. target->sg_tablesize = target->cmd_sg_cnt;
  1809. }
  1810. target_host->sg_tablesize = target->sg_tablesize;
  1811. target->indirect_size = target->sg_tablesize *
  1812. sizeof (struct srp_direct_buf);
  1813. target->max_iu_len = sizeof (struct srp_cmd) +
  1814. sizeof (struct srp_indirect_buf) +
  1815. target->cmd_sg_cnt * sizeof (struct srp_direct_buf);
  1816. spin_lock_init(&target->lock);
  1817. INIT_LIST_HEAD(&target->free_tx);
  1818. INIT_LIST_HEAD(&target->free_reqs);
  1819. for (i = 0; i < SRP_CMD_SQ_SIZE; ++i) {
  1820. struct srp_request *req = &target->req_ring[i];
  1821. req->fmr_list = kmalloc(target->cmd_sg_cnt * sizeof (void *),
  1822. GFP_KERNEL);
  1823. req->map_page = kmalloc(SRP_FMR_SIZE * sizeof (void *),
  1824. GFP_KERNEL);
  1825. req->indirect_desc = kmalloc(target->indirect_size, GFP_KERNEL);
  1826. if (!req->fmr_list || !req->map_page || !req->indirect_desc)
  1827. goto err_free_mem;
  1828. dma_addr = ib_dma_map_single(ibdev, req->indirect_desc,
  1829. target->indirect_size,
  1830. DMA_TO_DEVICE);
  1831. if (ib_dma_mapping_error(ibdev, dma_addr))
  1832. goto err_free_mem;
  1833. req->indirect_dma_addr = dma_addr;
  1834. req->index = i;
  1835. list_add_tail(&req->list, &target->free_reqs);
  1836. }
  1837. ib_query_gid(ibdev, host->port, 0, &target->path.sgid);
  1838. shost_printk(KERN_DEBUG, target->scsi_host, PFX
  1839. "new target: id_ext %016llx ioc_guid %016llx pkey %04x "
  1840. "service_id %016llx dgid %pI6\n",
  1841. (unsigned long long) be64_to_cpu(target->id_ext),
  1842. (unsigned long long) be64_to_cpu(target->ioc_guid),
  1843. be16_to_cpu(target->path.pkey),
  1844. (unsigned long long) be64_to_cpu(target->service_id),
  1845. target->path.dgid.raw);
  1846. ret = srp_create_target_ib(target);
  1847. if (ret)
  1848. goto err_free_mem;
  1849. ret = srp_new_cm_id(target);
  1850. if (ret)
  1851. goto err_free_ib;
  1852. target->qp_in_error = 0;
  1853. ret = srp_connect_target(target);
  1854. if (ret) {
  1855. shost_printk(KERN_ERR, target->scsi_host,
  1856. PFX "Connection failed\n");
  1857. goto err_cm_id;
  1858. }
  1859. ret = srp_add_target(host, target);
  1860. if (ret)
  1861. goto err_disconnect;
  1862. return count;
  1863. err_disconnect:
  1864. srp_disconnect_target(target);
  1865. err_cm_id:
  1866. ib_destroy_cm_id(target->cm_id);
  1867. err_free_ib:
  1868. srp_free_target_ib(target);
  1869. err_free_mem:
  1870. srp_free_req_data(target);
  1871. err:
  1872. scsi_host_put(target_host);
  1873. return ret;
  1874. }
  1875. static DEVICE_ATTR(add_target, S_IWUSR, NULL, srp_create_target);
  1876. static ssize_t show_ibdev(struct device *dev, struct device_attribute *attr,
  1877. char *buf)
  1878. {
  1879. struct srp_host *host = container_of(dev, struct srp_host, dev);
  1880. return sprintf(buf, "%s\n", host->srp_dev->dev->name);
  1881. }
  1882. static DEVICE_ATTR(ibdev, S_IRUGO, show_ibdev, NULL);
  1883. static ssize_t show_port(struct device *dev, struct device_attribute *attr,
  1884. char *buf)
  1885. {
  1886. struct srp_host *host = container_of(dev, struct srp_host, dev);
  1887. return sprintf(buf, "%d\n", host->port);
  1888. }
  1889. static DEVICE_ATTR(port, S_IRUGO, show_port, NULL);
  1890. static struct srp_host *srp_add_port(struct srp_device *device, u8 port)
  1891. {
  1892. struct srp_host *host;
  1893. host = kzalloc(sizeof *host, GFP_KERNEL);
  1894. if (!host)
  1895. return NULL;
  1896. INIT_LIST_HEAD(&host->target_list);
  1897. spin_lock_init(&host->target_lock);
  1898. init_completion(&host->released);
  1899. host->srp_dev = device;
  1900. host->port = port;
  1901. host->dev.class = &srp_class;
  1902. host->dev.parent = device->dev->dma_device;
  1903. dev_set_name(&host->dev, "srp-%s-%d", device->dev->name, port);
  1904. if (device_register(&host->dev))
  1905. goto free_host;
  1906. if (device_create_file(&host->dev, &dev_attr_add_target))
  1907. goto err_class;
  1908. if (device_create_file(&host->dev, &dev_attr_ibdev))
  1909. goto err_class;
  1910. if (device_create_file(&host->dev, &dev_attr_port))
  1911. goto err_class;
  1912. return host;
  1913. err_class:
  1914. device_unregister(&host->dev);
  1915. free_host:
  1916. kfree(host);
  1917. return NULL;
  1918. }
  1919. static void srp_add_one(struct ib_device *device)
  1920. {
  1921. struct srp_device *srp_dev;
  1922. struct ib_device_attr *dev_attr;
  1923. struct ib_fmr_pool_param fmr_param;
  1924. struct srp_host *host;
  1925. int max_pages_per_fmr, fmr_page_shift, s, e, p;
  1926. dev_attr = kmalloc(sizeof *dev_attr, GFP_KERNEL);
  1927. if (!dev_attr)
  1928. return;
  1929. if (ib_query_device(device, dev_attr)) {
  1930. pr_warn("Query device failed for %s\n", device->name);
  1931. goto free_attr;
  1932. }
  1933. srp_dev = kmalloc(sizeof *srp_dev, GFP_KERNEL);
  1934. if (!srp_dev)
  1935. goto free_attr;
  1936. /*
  1937. * Use the smallest page size supported by the HCA, down to a
  1938. * minimum of 4096 bytes. We're unlikely to build large sglists
  1939. * out of smaller entries.
  1940. */
  1941. fmr_page_shift = max(12, ffs(dev_attr->page_size_cap) - 1);
  1942. srp_dev->fmr_page_size = 1 << fmr_page_shift;
  1943. srp_dev->fmr_page_mask = ~((u64) srp_dev->fmr_page_size - 1);
  1944. srp_dev->fmr_max_size = srp_dev->fmr_page_size * SRP_FMR_SIZE;
  1945. INIT_LIST_HEAD(&srp_dev->dev_list);
  1946. srp_dev->dev = device;
  1947. srp_dev->pd = ib_alloc_pd(device);
  1948. if (IS_ERR(srp_dev->pd))
  1949. goto free_dev;
  1950. srp_dev->mr = ib_get_dma_mr(srp_dev->pd,
  1951. IB_ACCESS_LOCAL_WRITE |
  1952. IB_ACCESS_REMOTE_READ |
  1953. IB_ACCESS_REMOTE_WRITE);
  1954. if (IS_ERR(srp_dev->mr))
  1955. goto err_pd;
  1956. for (max_pages_per_fmr = SRP_FMR_SIZE;
  1957. max_pages_per_fmr >= SRP_FMR_MIN_SIZE;
  1958. max_pages_per_fmr /= 2, srp_dev->fmr_max_size /= 2) {
  1959. memset(&fmr_param, 0, sizeof fmr_param);
  1960. fmr_param.pool_size = SRP_FMR_POOL_SIZE;
  1961. fmr_param.dirty_watermark = SRP_FMR_DIRTY_SIZE;
  1962. fmr_param.cache = 1;
  1963. fmr_param.max_pages_per_fmr = max_pages_per_fmr;
  1964. fmr_param.page_shift = fmr_page_shift;
  1965. fmr_param.access = (IB_ACCESS_LOCAL_WRITE |
  1966. IB_ACCESS_REMOTE_WRITE |
  1967. IB_ACCESS_REMOTE_READ);
  1968. srp_dev->fmr_pool = ib_create_fmr_pool(srp_dev->pd, &fmr_param);
  1969. if (!IS_ERR(srp_dev->fmr_pool))
  1970. break;
  1971. }
  1972. if (IS_ERR(srp_dev->fmr_pool))
  1973. srp_dev->fmr_pool = NULL;
  1974. if (device->node_type == RDMA_NODE_IB_SWITCH) {
  1975. s = 0;
  1976. e = 0;
  1977. } else {
  1978. s = 1;
  1979. e = device->phys_port_cnt;
  1980. }
  1981. for (p = s; p <= e; ++p) {
  1982. host = srp_add_port(srp_dev, p);
  1983. if (host)
  1984. list_add_tail(&host->list, &srp_dev->dev_list);
  1985. }
  1986. ib_set_client_data(device, &srp_client, srp_dev);
  1987. goto free_attr;
  1988. err_pd:
  1989. ib_dealloc_pd(srp_dev->pd);
  1990. free_dev:
  1991. kfree(srp_dev);
  1992. free_attr:
  1993. kfree(dev_attr);
  1994. }
  1995. static void srp_remove_one(struct ib_device *device)
  1996. {
  1997. struct srp_device *srp_dev;
  1998. struct srp_host *host, *tmp_host;
  1999. LIST_HEAD(target_list);
  2000. struct srp_target_port *target, *tmp_target;
  2001. srp_dev = ib_get_client_data(device, &srp_client);
  2002. list_for_each_entry_safe(host, tmp_host, &srp_dev->dev_list, list) {
  2003. device_unregister(&host->dev);
  2004. /*
  2005. * Wait for the sysfs entry to go away, so that no new
  2006. * target ports can be created.
  2007. */
  2008. wait_for_completion(&host->released);
  2009. /*
  2010. * Mark all target ports as removed, so we stop queueing
  2011. * commands and don't try to reconnect.
  2012. */
  2013. spin_lock(&host->target_lock);
  2014. list_for_each_entry(target, &host->target_list, list) {
  2015. spin_lock_irq(&target->lock);
  2016. target->state = SRP_TARGET_REMOVED;
  2017. spin_unlock_irq(&target->lock);
  2018. }
  2019. spin_unlock(&host->target_lock);
  2020. /*
  2021. * Wait for any reconnection tasks that may have
  2022. * started before we marked our target ports as
  2023. * removed, and any target port removal tasks.
  2024. */
  2025. flush_workqueue(ib_wq);
  2026. list_for_each_entry_safe(target, tmp_target,
  2027. &host->target_list, list) {
  2028. srp_del_scsi_host_attr(target->scsi_host);
  2029. srp_remove_host(target->scsi_host);
  2030. scsi_remove_host(target->scsi_host);
  2031. srp_disconnect_target(target);
  2032. ib_destroy_cm_id(target->cm_id);
  2033. srp_free_target_ib(target);
  2034. srp_free_req_data(target);
  2035. scsi_host_put(target->scsi_host);
  2036. }
  2037. kfree(host);
  2038. }
  2039. if (srp_dev->fmr_pool)
  2040. ib_destroy_fmr_pool(srp_dev->fmr_pool);
  2041. ib_dereg_mr(srp_dev->mr);
  2042. ib_dealloc_pd(srp_dev->pd);
  2043. kfree(srp_dev);
  2044. }
  2045. static struct srp_function_template ib_srp_transport_functions = {
  2046. };
  2047. static int __init srp_init_module(void)
  2048. {
  2049. int ret;
  2050. BUILD_BUG_ON(FIELD_SIZEOF(struct ib_wc, wr_id) < sizeof(void *));
  2051. if (srp_sg_tablesize) {
  2052. pr_warn("srp_sg_tablesize is deprecated, please use cmd_sg_entries\n");
  2053. if (!cmd_sg_entries)
  2054. cmd_sg_entries = srp_sg_tablesize;
  2055. }
  2056. if (!cmd_sg_entries)
  2057. cmd_sg_entries = SRP_DEF_SG_TABLESIZE;
  2058. if (cmd_sg_entries > 255) {
  2059. pr_warn("Clamping cmd_sg_entries to 255\n");
  2060. cmd_sg_entries = 255;
  2061. }
  2062. if (!indirect_sg_entries)
  2063. indirect_sg_entries = cmd_sg_entries;
  2064. else if (indirect_sg_entries < cmd_sg_entries) {
  2065. pr_warn("Bumping up indirect_sg_entries to match cmd_sg_entries (%u)\n",
  2066. cmd_sg_entries);
  2067. indirect_sg_entries = cmd_sg_entries;
  2068. }
  2069. ib_srp_transport_template =
  2070. srp_attach_transport(&ib_srp_transport_functions);
  2071. if (!ib_srp_transport_template)
  2072. return -ENOMEM;
  2073. ret = class_register(&srp_class);
  2074. if (ret) {
  2075. pr_err("couldn't register class infiniband_srp\n");
  2076. srp_release_transport(ib_srp_transport_template);
  2077. return ret;
  2078. }
  2079. ib_sa_register_client(&srp_sa_client);
  2080. ret = ib_register_client(&srp_client);
  2081. if (ret) {
  2082. pr_err("couldn't register IB client\n");
  2083. srp_release_transport(ib_srp_transport_template);
  2084. ib_sa_unregister_client(&srp_sa_client);
  2085. class_unregister(&srp_class);
  2086. return ret;
  2087. }
  2088. return 0;
  2089. }
  2090. static void __exit srp_cleanup_module(void)
  2091. {
  2092. ib_unregister_client(&srp_client);
  2093. ib_sa_unregister_client(&srp_sa_client);
  2094. class_unregister(&srp_class);
  2095. srp_release_transport(ib_srp_transport_template);
  2096. }
  2097. module_init(srp_init_module);
  2098. module_exit(srp_cleanup_module);