ibmvstgt.c 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002
  1. /*
  2. * IBM eServer i/pSeries Virtual SCSI Target Driver
  3. * Copyright (C) 2003-2005 Dave Boutcher (boutcher@us.ibm.com) IBM Corp.
  4. * Santiago Leon (santil@us.ibm.com) IBM Corp.
  5. * Linda Xie (lxie@us.ibm.com) IBM Corp.
  6. *
  7. * Copyright (C) 2005-2006 FUJITA Tomonori <tomof@acm.org>
  8. *
  9. * This program is free software; you can redistribute it and/or modify
  10. * it under the terms of the GNU General Public License as published by
  11. * the Free Software Foundation; either version 2 of the License, or
  12. * (at your option) any later version.
  13. *
  14. * This program is distributed in the hope that it will be useful,
  15. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  16. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  17. * GNU General Public License for more details.
  18. *
  19. * You should have received a copy of the GNU General Public License
  20. * along with this program; if not, write to the Free Software
  21. * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
  22. * USA
  23. */
  24. #include <linux/interrupt.h>
  25. #include <linux/module.h>
  26. #include <linux/slab.h>
  27. #include <scsi/scsi.h>
  28. #include <scsi/scsi_host.h>
  29. #include <scsi/scsi_transport_srp.h>
  30. #include <scsi/scsi_tgt.h>
  31. #include <scsi/libsrp.h>
  32. #include <asm/hvcall.h>
  33. #include <asm/iommu.h>
  34. #include <asm/prom.h>
  35. #include <asm/vio.h>
  36. #include "ibmvscsi.h"
  37. #define INITIAL_SRP_LIMIT 16
  38. #define DEFAULT_MAX_SECTORS 256
  39. #define TGT_NAME "ibmvstgt"
  40. /*
  41. * Hypervisor calls.
  42. */
  43. #define h_copy_rdma(l, sa, sb, da, db) \
  44. plpar_hcall_norets(H_COPY_RDMA, l, sa, sb, da, db)
  45. #define h_send_crq(ua, l, h) \
  46. plpar_hcall_norets(H_SEND_CRQ, ua, l, h)
  47. #define h_reg_crq(ua, tok, sz)\
  48. plpar_hcall_norets(H_REG_CRQ, ua, tok, sz);
  49. #define h_free_crq(ua) \
  50. plpar_hcall_norets(H_FREE_CRQ, ua);
  51. /* tmp - will replace with SCSI logging stuff */
  52. #define eprintk(fmt, args...) \
  53. do { \
  54. printk("%s(%d) " fmt, __func__, __LINE__, ##args); \
  55. } while (0)
  56. /* #define dprintk eprintk */
  57. #define dprintk(fmt, args...)
  58. struct vio_port {
  59. struct vio_dev *dma_dev;
  60. struct crq_queue crq_queue;
  61. struct work_struct crq_work;
  62. unsigned long liobn;
  63. unsigned long riobn;
  64. struct srp_target *target;
  65. struct srp_rport *rport;
  66. };
  67. static struct workqueue_struct *vtgtd;
  68. static struct scsi_transport_template *ibmvstgt_transport_template;
  69. /*
  70. * These are fixed for the system and come from the Open Firmware device tree.
  71. * We just store them here to save getting them every time.
  72. */
  73. static char system_id[64] = "";
  74. static char partition_name[97] = "UNKNOWN";
  75. static unsigned int partition_number = -1;
  76. static struct vio_port *target_to_port(struct srp_target *target)
  77. {
  78. return (struct vio_port *) target->ldata;
  79. }
  80. static inline union viosrp_iu *vio_iu(struct iu_entry *iue)
  81. {
  82. return (union viosrp_iu *) (iue->sbuf->buf);
  83. }
  84. static int send_iu(struct iu_entry *iue, uint64_t length, uint8_t format)
  85. {
  86. struct srp_target *target = iue->target;
  87. struct vio_port *vport = target_to_port(target);
  88. long rc, rc1;
  89. union {
  90. struct viosrp_crq cooked;
  91. uint64_t raw[2];
  92. } crq;
  93. /* First copy the SRP */
  94. rc = h_copy_rdma(length, vport->liobn, iue->sbuf->dma,
  95. vport->riobn, iue->remote_token);
  96. if (rc)
  97. eprintk("Error %ld transferring data\n", rc);
  98. crq.cooked.valid = 0x80;
  99. crq.cooked.format = format;
  100. crq.cooked.reserved = 0x00;
  101. crq.cooked.timeout = 0x00;
  102. crq.cooked.IU_length = length;
  103. crq.cooked.IU_data_ptr = vio_iu(iue)->srp.rsp.tag;
  104. if (rc == 0)
  105. crq.cooked.status = 0x99; /* Just needs to be non-zero */
  106. else
  107. crq.cooked.status = 0x00;
  108. rc1 = h_send_crq(vport->dma_dev->unit_address, crq.raw[0], crq.raw[1]);
  109. if (rc1) {
  110. eprintk("%ld sending response\n", rc1);
  111. return rc1;
  112. }
  113. return rc;
  114. }
  115. #define SRP_RSP_SENSE_DATA_LEN 18
  116. static int send_rsp(struct iu_entry *iue, struct scsi_cmnd *sc,
  117. unsigned char status, unsigned char asc)
  118. {
  119. union viosrp_iu *iu = vio_iu(iue);
  120. uint64_t tag = iu->srp.rsp.tag;
  121. /* If the linked bit is on and status is good */
  122. if (test_bit(V_LINKED, &iue->flags) && (status == NO_SENSE))
  123. status = 0x10;
  124. memset(iu, 0, sizeof(struct srp_rsp));
  125. iu->srp.rsp.opcode = SRP_RSP;
  126. iu->srp.rsp.req_lim_delta = 1;
  127. iu->srp.rsp.tag = tag;
  128. if (test_bit(V_DIOVER, &iue->flags))
  129. iu->srp.rsp.flags |= SRP_RSP_FLAG_DIOVER;
  130. iu->srp.rsp.data_in_res_cnt = 0;
  131. iu->srp.rsp.data_out_res_cnt = 0;
  132. iu->srp.rsp.flags &= ~SRP_RSP_FLAG_RSPVALID;
  133. iu->srp.rsp.resp_data_len = 0;
  134. iu->srp.rsp.status = status;
  135. if (status) {
  136. uint8_t *sense = iu->srp.rsp.data;
  137. if (sc) {
  138. iu->srp.rsp.flags |= SRP_RSP_FLAG_SNSVALID;
  139. iu->srp.rsp.sense_data_len = SCSI_SENSE_BUFFERSIZE;
  140. memcpy(sense, sc->sense_buffer, SCSI_SENSE_BUFFERSIZE);
  141. } else {
  142. iu->srp.rsp.status = SAM_STAT_CHECK_CONDITION;
  143. iu->srp.rsp.flags |= SRP_RSP_FLAG_SNSVALID;
  144. iu->srp.rsp.sense_data_len = SRP_RSP_SENSE_DATA_LEN;
  145. /* Valid bit and 'current errors' */
  146. sense[0] = (0x1 << 7 | 0x70);
  147. /* Sense key */
  148. sense[2] = status;
  149. /* Additional sense length */
  150. sense[7] = 0xa; /* 10 bytes */
  151. /* Additional sense code */
  152. sense[12] = asc;
  153. }
  154. }
  155. send_iu(iue, sizeof(iu->srp.rsp) + SRP_RSP_SENSE_DATA_LEN,
  156. VIOSRP_SRP_FORMAT);
  157. return 0;
  158. }
  159. static void handle_cmd_queue(struct srp_target *target)
  160. {
  161. struct Scsi_Host *shost = target->shost;
  162. struct srp_rport *rport = target_to_port(target)->rport;
  163. struct iu_entry *iue;
  164. struct srp_cmd *cmd;
  165. unsigned long flags;
  166. int err;
  167. retry:
  168. spin_lock_irqsave(&target->lock, flags);
  169. list_for_each_entry(iue, &target->cmd_queue, ilist) {
  170. if (!test_and_set_bit(V_FLYING, &iue->flags)) {
  171. spin_unlock_irqrestore(&target->lock, flags);
  172. cmd = iue->sbuf->buf;
  173. err = srp_cmd_queue(shost, cmd, iue,
  174. (unsigned long)rport, 0);
  175. if (err) {
  176. eprintk("cannot queue cmd %p %d\n", cmd, err);
  177. srp_iu_put(iue);
  178. }
  179. goto retry;
  180. }
  181. }
  182. spin_unlock_irqrestore(&target->lock, flags);
  183. }
  184. static int ibmvstgt_rdma(struct scsi_cmnd *sc, struct scatterlist *sg, int nsg,
  185. struct srp_direct_buf *md, int nmd,
  186. enum dma_data_direction dir, unsigned int rest)
  187. {
  188. struct iu_entry *iue = (struct iu_entry *) sc->SCp.ptr;
  189. struct srp_target *target = iue->target;
  190. struct vio_port *vport = target_to_port(target);
  191. dma_addr_t token;
  192. long err;
  193. unsigned int done = 0;
  194. int i, sidx, soff;
  195. sidx = soff = 0;
  196. token = sg_dma_address(sg + sidx);
  197. for (i = 0; i < nmd && rest; i++) {
  198. unsigned int mdone, mlen;
  199. mlen = min(rest, md[i].len);
  200. for (mdone = 0; mlen;) {
  201. int slen = min(sg_dma_len(sg + sidx) - soff, mlen);
  202. if (dir == DMA_TO_DEVICE)
  203. err = h_copy_rdma(slen,
  204. vport->riobn,
  205. md[i].va + mdone,
  206. vport->liobn,
  207. token + soff);
  208. else
  209. err = h_copy_rdma(slen,
  210. vport->liobn,
  211. token + soff,
  212. vport->riobn,
  213. md[i].va + mdone);
  214. if (err != H_SUCCESS) {
  215. eprintk("rdma error %d %d %ld\n", dir, slen, err);
  216. return -EIO;
  217. }
  218. mlen -= slen;
  219. mdone += slen;
  220. soff += slen;
  221. done += slen;
  222. if (soff == sg_dma_len(sg + sidx)) {
  223. sidx++;
  224. soff = 0;
  225. token = sg_dma_address(sg + sidx);
  226. if (sidx > nsg) {
  227. eprintk("out of sg %p %d %d\n",
  228. iue, sidx, nsg);
  229. return -EIO;
  230. }
  231. }
  232. };
  233. rest -= mlen;
  234. }
  235. return 0;
  236. }
  237. static int ibmvstgt_cmd_done(struct scsi_cmnd *sc,
  238. void (*done)(struct scsi_cmnd *))
  239. {
  240. unsigned long flags;
  241. struct iu_entry *iue = (struct iu_entry *) sc->SCp.ptr;
  242. struct srp_target *target = iue->target;
  243. int err = 0;
  244. dprintk("%p %p %x %u\n", iue, target, vio_iu(iue)->srp.cmd.cdb[0],
  245. scsi_sg_count(sc));
  246. if (scsi_sg_count(sc))
  247. err = srp_transfer_data(sc, &vio_iu(iue)->srp.cmd, ibmvstgt_rdma, 1, 1);
  248. spin_lock_irqsave(&target->lock, flags);
  249. list_del(&iue->ilist);
  250. spin_unlock_irqrestore(&target->lock, flags);
  251. if (err|| sc->result != SAM_STAT_GOOD) {
  252. eprintk("operation failed %p %d %x\n",
  253. iue, sc->result, vio_iu(iue)->srp.cmd.cdb[0]);
  254. send_rsp(iue, sc, HARDWARE_ERROR, 0x00);
  255. } else
  256. send_rsp(iue, sc, NO_SENSE, 0x00);
  257. done(sc);
  258. srp_iu_put(iue);
  259. return 0;
  260. }
  261. int send_adapter_info(struct iu_entry *iue,
  262. dma_addr_t remote_buffer, uint16_t length)
  263. {
  264. struct srp_target *target = iue->target;
  265. struct vio_port *vport = target_to_port(target);
  266. struct Scsi_Host *shost = target->shost;
  267. dma_addr_t data_token;
  268. struct mad_adapter_info_data *info;
  269. int err;
  270. info = dma_alloc_coherent(target->dev, sizeof(*info), &data_token,
  271. GFP_KERNEL);
  272. if (!info) {
  273. eprintk("bad dma_alloc_coherent %p\n", target);
  274. return 1;
  275. }
  276. /* Get remote info */
  277. err = h_copy_rdma(sizeof(*info), vport->riobn, remote_buffer,
  278. vport->liobn, data_token);
  279. if (err == H_SUCCESS) {
  280. dprintk("Client connect: %s (%d)\n",
  281. info->partition_name, info->partition_number);
  282. }
  283. memset(info, 0, sizeof(*info));
  284. strcpy(info->srp_version, "16.a");
  285. strncpy(info->partition_name, partition_name,
  286. sizeof(info->partition_name));
  287. info->partition_number = partition_number;
  288. info->mad_version = 1;
  289. info->os_type = 2;
  290. info->port_max_txu[0] = shost->hostt->max_sectors << 9;
  291. /* Send our info to remote */
  292. err = h_copy_rdma(sizeof(*info), vport->liobn, data_token,
  293. vport->riobn, remote_buffer);
  294. dma_free_coherent(target->dev, sizeof(*info), info, data_token);
  295. if (err != H_SUCCESS) {
  296. eprintk("Error sending adapter info %d\n", err);
  297. return 1;
  298. }
  299. return 0;
  300. }
  301. static void process_login(struct iu_entry *iue)
  302. {
  303. union viosrp_iu *iu = vio_iu(iue);
  304. struct srp_login_rsp *rsp = &iu->srp.login_rsp;
  305. uint64_t tag = iu->srp.rsp.tag;
  306. struct Scsi_Host *shost = iue->target->shost;
  307. struct srp_target *target = host_to_srp_target(shost);
  308. struct vio_port *vport = target_to_port(target);
  309. struct srp_rport_identifiers ids;
  310. memset(&ids, 0, sizeof(ids));
  311. sprintf(ids.port_id, "%x", vport->dma_dev->unit_address);
  312. ids.roles = SRP_RPORT_ROLE_INITIATOR;
  313. if (!vport->rport)
  314. vport->rport = srp_rport_add(shost, &ids);
  315. /* TODO handle case that requested size is wrong and
  316. * buffer format is wrong
  317. */
  318. memset(iu, 0, sizeof(struct srp_login_rsp));
  319. rsp->opcode = SRP_LOGIN_RSP;
  320. rsp->req_lim_delta = INITIAL_SRP_LIMIT;
  321. rsp->tag = tag;
  322. rsp->max_it_iu_len = sizeof(union srp_iu);
  323. rsp->max_ti_iu_len = sizeof(union srp_iu);
  324. /* direct and indirect */
  325. rsp->buf_fmt = SRP_BUF_FORMAT_DIRECT | SRP_BUF_FORMAT_INDIRECT;
  326. send_iu(iue, sizeof(*rsp), VIOSRP_SRP_FORMAT);
  327. }
  328. static inline void queue_cmd(struct iu_entry *iue)
  329. {
  330. struct srp_target *target = iue->target;
  331. unsigned long flags;
  332. spin_lock_irqsave(&target->lock, flags);
  333. list_add_tail(&iue->ilist, &target->cmd_queue);
  334. spin_unlock_irqrestore(&target->lock, flags);
  335. }
  336. static int process_tsk_mgmt(struct iu_entry *iue)
  337. {
  338. union viosrp_iu *iu = vio_iu(iue);
  339. int fn;
  340. dprintk("%p %u\n", iue, iu->srp.tsk_mgmt.tsk_mgmt_func);
  341. switch (iu->srp.tsk_mgmt.tsk_mgmt_func) {
  342. case SRP_TSK_ABORT_TASK:
  343. fn = ABORT_TASK;
  344. break;
  345. case SRP_TSK_ABORT_TASK_SET:
  346. fn = ABORT_TASK_SET;
  347. break;
  348. case SRP_TSK_CLEAR_TASK_SET:
  349. fn = CLEAR_TASK_SET;
  350. break;
  351. case SRP_TSK_LUN_RESET:
  352. fn = LOGICAL_UNIT_RESET;
  353. break;
  354. case SRP_TSK_CLEAR_ACA:
  355. fn = CLEAR_ACA;
  356. break;
  357. default:
  358. fn = 0;
  359. }
  360. if (fn)
  361. scsi_tgt_tsk_mgmt_request(iue->target->shost,
  362. (unsigned long)iue->target->shost,
  363. fn,
  364. iu->srp.tsk_mgmt.task_tag,
  365. (struct scsi_lun *) &iu->srp.tsk_mgmt.lun,
  366. iue);
  367. else
  368. send_rsp(iue, NULL, ILLEGAL_REQUEST, 0x20);
  369. return !fn;
  370. }
  371. static int process_mad_iu(struct iu_entry *iue)
  372. {
  373. union viosrp_iu *iu = vio_iu(iue);
  374. struct viosrp_adapter_info *info;
  375. struct viosrp_host_config *conf;
  376. switch (iu->mad.empty_iu.common.type) {
  377. case VIOSRP_EMPTY_IU_TYPE:
  378. eprintk("%s\n", "Unsupported EMPTY MAD IU");
  379. break;
  380. case VIOSRP_ERROR_LOG_TYPE:
  381. eprintk("%s\n", "Unsupported ERROR LOG MAD IU");
  382. iu->mad.error_log.common.status = 1;
  383. send_iu(iue, sizeof(iu->mad.error_log), VIOSRP_MAD_FORMAT);
  384. break;
  385. case VIOSRP_ADAPTER_INFO_TYPE:
  386. info = &iu->mad.adapter_info;
  387. info->common.status = send_adapter_info(iue, info->buffer,
  388. info->common.length);
  389. send_iu(iue, sizeof(*info), VIOSRP_MAD_FORMAT);
  390. break;
  391. case VIOSRP_HOST_CONFIG_TYPE:
  392. conf = &iu->mad.host_config;
  393. conf->common.status = 1;
  394. send_iu(iue, sizeof(*conf), VIOSRP_MAD_FORMAT);
  395. break;
  396. default:
  397. eprintk("Unknown type %u\n", iu->srp.rsp.opcode);
  398. }
  399. return 1;
  400. }
  401. static int process_srp_iu(struct iu_entry *iue)
  402. {
  403. union viosrp_iu *iu = vio_iu(iue);
  404. int done = 1;
  405. u8 opcode = iu->srp.rsp.opcode;
  406. switch (opcode) {
  407. case SRP_LOGIN_REQ:
  408. process_login(iue);
  409. break;
  410. case SRP_TSK_MGMT:
  411. done = process_tsk_mgmt(iue);
  412. break;
  413. case SRP_CMD:
  414. queue_cmd(iue);
  415. done = 0;
  416. break;
  417. case SRP_LOGIN_RSP:
  418. case SRP_I_LOGOUT:
  419. case SRP_T_LOGOUT:
  420. case SRP_RSP:
  421. case SRP_CRED_REQ:
  422. case SRP_CRED_RSP:
  423. case SRP_AER_REQ:
  424. case SRP_AER_RSP:
  425. eprintk("Unsupported type %u\n", opcode);
  426. break;
  427. default:
  428. eprintk("Unknown type %u\n", opcode);
  429. }
  430. return done;
  431. }
  432. static void process_iu(struct viosrp_crq *crq, struct srp_target *target)
  433. {
  434. struct vio_port *vport = target_to_port(target);
  435. struct iu_entry *iue;
  436. long err;
  437. int done = 1;
  438. iue = srp_iu_get(target);
  439. if (!iue) {
  440. eprintk("Error getting IU from pool, %p\n", target);
  441. return;
  442. }
  443. iue->remote_token = crq->IU_data_ptr;
  444. err = h_copy_rdma(crq->IU_length, vport->riobn,
  445. iue->remote_token, vport->liobn, iue->sbuf->dma);
  446. if (err != H_SUCCESS) {
  447. eprintk("%ld transferring data error %p\n", err, iue);
  448. goto out;
  449. }
  450. if (crq->format == VIOSRP_MAD_FORMAT)
  451. done = process_mad_iu(iue);
  452. else
  453. done = process_srp_iu(iue);
  454. out:
  455. if (done)
  456. srp_iu_put(iue);
  457. }
  458. static irqreturn_t ibmvstgt_interrupt(int dummy, void *data)
  459. {
  460. struct srp_target *target = data;
  461. struct vio_port *vport = target_to_port(target);
  462. vio_disable_interrupts(vport->dma_dev);
  463. queue_work(vtgtd, &vport->crq_work);
  464. return IRQ_HANDLED;
  465. }
  466. static int crq_queue_create(struct crq_queue *queue, struct srp_target *target)
  467. {
  468. int err;
  469. struct vio_port *vport = target_to_port(target);
  470. queue->msgs = (struct viosrp_crq *) get_zeroed_page(GFP_KERNEL);
  471. if (!queue->msgs)
  472. goto malloc_failed;
  473. queue->size = PAGE_SIZE / sizeof(*queue->msgs);
  474. queue->msg_token = dma_map_single(target->dev, queue->msgs,
  475. queue->size * sizeof(*queue->msgs),
  476. DMA_BIDIRECTIONAL);
  477. if (dma_mapping_error(target->dev, queue->msg_token))
  478. goto map_failed;
  479. err = h_reg_crq(vport->dma_dev->unit_address, queue->msg_token,
  480. PAGE_SIZE);
  481. /* If the adapter was left active for some reason (like kexec)
  482. * try freeing and re-registering
  483. */
  484. if (err == H_RESOURCE) {
  485. do {
  486. err = h_free_crq(vport->dma_dev->unit_address);
  487. } while (err == H_BUSY || H_IS_LONG_BUSY(err));
  488. err = h_reg_crq(vport->dma_dev->unit_address, queue->msg_token,
  489. PAGE_SIZE);
  490. }
  491. if (err != H_SUCCESS && err != 2) {
  492. eprintk("Error 0x%x opening virtual adapter\n", err);
  493. goto reg_crq_failed;
  494. }
  495. err = request_irq(vport->dma_dev->irq, &ibmvstgt_interrupt,
  496. IRQF_DISABLED, "ibmvstgt", target);
  497. if (err)
  498. goto req_irq_failed;
  499. vio_enable_interrupts(vport->dma_dev);
  500. h_send_crq(vport->dma_dev->unit_address, 0xC001000000000000, 0);
  501. queue->cur = 0;
  502. spin_lock_init(&queue->lock);
  503. return 0;
  504. req_irq_failed:
  505. do {
  506. err = h_free_crq(vport->dma_dev->unit_address);
  507. } while (err == H_BUSY || H_IS_LONG_BUSY(err));
  508. reg_crq_failed:
  509. dma_unmap_single(target->dev, queue->msg_token,
  510. queue->size * sizeof(*queue->msgs), DMA_BIDIRECTIONAL);
  511. map_failed:
  512. free_page((unsigned long) queue->msgs);
  513. malloc_failed:
  514. return -ENOMEM;
  515. }
  516. static void crq_queue_destroy(struct srp_target *target)
  517. {
  518. struct vio_port *vport = target_to_port(target);
  519. struct crq_queue *queue = &vport->crq_queue;
  520. int err;
  521. free_irq(vport->dma_dev->irq, target);
  522. do {
  523. err = h_free_crq(vport->dma_dev->unit_address);
  524. } while (err == H_BUSY || H_IS_LONG_BUSY(err));
  525. dma_unmap_single(target->dev, queue->msg_token,
  526. queue->size * sizeof(*queue->msgs), DMA_BIDIRECTIONAL);
  527. free_page((unsigned long) queue->msgs);
  528. }
  529. static void process_crq(struct viosrp_crq *crq, struct srp_target *target)
  530. {
  531. struct vio_port *vport = target_to_port(target);
  532. dprintk("%x %x\n", crq->valid, crq->format);
  533. switch (crq->valid) {
  534. case 0xC0:
  535. /* initialization */
  536. switch (crq->format) {
  537. case 0x01:
  538. h_send_crq(vport->dma_dev->unit_address,
  539. 0xC002000000000000, 0);
  540. break;
  541. case 0x02:
  542. break;
  543. default:
  544. eprintk("Unknown format %u\n", crq->format);
  545. }
  546. break;
  547. case 0xFF:
  548. /* transport event */
  549. break;
  550. case 0x80:
  551. /* real payload */
  552. switch (crq->format) {
  553. case VIOSRP_SRP_FORMAT:
  554. case VIOSRP_MAD_FORMAT:
  555. process_iu(crq, target);
  556. break;
  557. case VIOSRP_OS400_FORMAT:
  558. case VIOSRP_AIX_FORMAT:
  559. case VIOSRP_LINUX_FORMAT:
  560. case VIOSRP_INLINE_FORMAT:
  561. eprintk("Unsupported format %u\n", crq->format);
  562. break;
  563. default:
  564. eprintk("Unknown format %u\n", crq->format);
  565. }
  566. break;
  567. default:
  568. eprintk("unknown message type 0x%02x!?\n", crq->valid);
  569. }
  570. }
  571. static inline struct viosrp_crq *next_crq(struct crq_queue *queue)
  572. {
  573. struct viosrp_crq *crq;
  574. unsigned long flags;
  575. spin_lock_irqsave(&queue->lock, flags);
  576. crq = &queue->msgs[queue->cur];
  577. if (crq->valid & 0x80) {
  578. if (++queue->cur == queue->size)
  579. queue->cur = 0;
  580. } else
  581. crq = NULL;
  582. spin_unlock_irqrestore(&queue->lock, flags);
  583. return crq;
  584. }
  585. static void handle_crq(struct work_struct *work)
  586. {
  587. struct vio_port *vport = container_of(work, struct vio_port, crq_work);
  588. struct srp_target *target = vport->target;
  589. struct viosrp_crq *crq;
  590. int done = 0;
  591. while (!done) {
  592. while ((crq = next_crq(&vport->crq_queue)) != NULL) {
  593. process_crq(crq, target);
  594. crq->valid = 0x00;
  595. }
  596. vio_enable_interrupts(vport->dma_dev);
  597. crq = next_crq(&vport->crq_queue);
  598. if (crq) {
  599. vio_disable_interrupts(vport->dma_dev);
  600. process_crq(crq, target);
  601. crq->valid = 0x00;
  602. } else
  603. done = 1;
  604. }
  605. handle_cmd_queue(target);
  606. }
  607. static int ibmvstgt_eh_abort_handler(struct scsi_cmnd *sc)
  608. {
  609. unsigned long flags;
  610. struct iu_entry *iue = (struct iu_entry *) sc->SCp.ptr;
  611. struct srp_target *target = iue->target;
  612. dprintk("%p %p %x\n", iue, target, vio_iu(iue)->srp.cmd.cdb[0]);
  613. spin_lock_irqsave(&target->lock, flags);
  614. list_del(&iue->ilist);
  615. spin_unlock_irqrestore(&target->lock, flags);
  616. srp_iu_put(iue);
  617. return 0;
  618. }
  619. static int ibmvstgt_tsk_mgmt_response(struct Scsi_Host *shost,
  620. u64 itn_id, u64 mid, int result)
  621. {
  622. struct iu_entry *iue = (struct iu_entry *) ((void *) mid);
  623. union viosrp_iu *iu = vio_iu(iue);
  624. unsigned char status, asc;
  625. eprintk("%p %d\n", iue, result);
  626. status = NO_SENSE;
  627. asc = 0;
  628. switch (iu->srp.tsk_mgmt.tsk_mgmt_func) {
  629. case SRP_TSK_ABORT_TASK:
  630. asc = 0x14;
  631. if (result)
  632. status = ABORTED_COMMAND;
  633. break;
  634. default:
  635. break;
  636. }
  637. send_rsp(iue, NULL, status, asc);
  638. srp_iu_put(iue);
  639. return 0;
  640. }
  641. static int ibmvstgt_it_nexus_response(struct Scsi_Host *shost, u64 itn_id,
  642. int result)
  643. {
  644. struct srp_target *target = host_to_srp_target(shost);
  645. struct vio_port *vport = target_to_port(target);
  646. if (result) {
  647. eprintk("%p %d\n", shost, result);
  648. srp_rport_del(vport->rport);
  649. vport->rport = NULL;
  650. }
  651. return 0;
  652. }
  653. static ssize_t system_id_show(struct device *dev,
  654. struct device_attribute *attr, char *buf)
  655. {
  656. return snprintf(buf, PAGE_SIZE, "%s\n", system_id);
  657. }
  658. static ssize_t partition_number_show(struct device *dev,
  659. struct device_attribute *attr, char *buf)
  660. {
  661. return snprintf(buf, PAGE_SIZE, "%x\n", partition_number);
  662. }
  663. static ssize_t unit_address_show(struct device *dev,
  664. struct device_attribute *attr, char *buf)
  665. {
  666. struct Scsi_Host *shost = class_to_shost(dev);
  667. struct srp_target *target = host_to_srp_target(shost);
  668. struct vio_port *vport = target_to_port(target);
  669. return snprintf(buf, PAGE_SIZE, "%x\n", vport->dma_dev->unit_address);
  670. }
  671. static DEVICE_ATTR(system_id, S_IRUGO, system_id_show, NULL);
  672. static DEVICE_ATTR(partition_number, S_IRUGO, partition_number_show, NULL);
  673. static DEVICE_ATTR(unit_address, S_IRUGO, unit_address_show, NULL);
  674. static struct device_attribute *ibmvstgt_attrs[] = {
  675. &dev_attr_system_id,
  676. &dev_attr_partition_number,
  677. &dev_attr_unit_address,
  678. NULL,
  679. };
  680. static struct scsi_host_template ibmvstgt_sht = {
  681. .name = TGT_NAME,
  682. .module = THIS_MODULE,
  683. .can_queue = INITIAL_SRP_LIMIT,
  684. .sg_tablesize = SG_ALL,
  685. .use_clustering = DISABLE_CLUSTERING,
  686. .max_sectors = DEFAULT_MAX_SECTORS,
  687. .transfer_response = ibmvstgt_cmd_done,
  688. .eh_abort_handler = ibmvstgt_eh_abort_handler,
  689. .shost_attrs = ibmvstgt_attrs,
  690. .proc_name = TGT_NAME,
  691. .supported_mode = MODE_TARGET,
  692. };
  693. static int ibmvstgt_probe(struct vio_dev *dev, const struct vio_device_id *id)
  694. {
  695. struct Scsi_Host *shost;
  696. struct srp_target *target;
  697. struct vio_port *vport;
  698. unsigned int *dma, dma_size;
  699. int err = -ENOMEM;
  700. vport = kzalloc(sizeof(struct vio_port), GFP_KERNEL);
  701. if (!vport)
  702. return err;
  703. shost = scsi_host_alloc(&ibmvstgt_sht, sizeof(struct srp_target));
  704. if (!shost)
  705. goto free_vport;
  706. shost->transportt = ibmvstgt_transport_template;
  707. target = host_to_srp_target(shost);
  708. target->shost = shost;
  709. vport->dma_dev = dev;
  710. target->ldata = vport;
  711. vport->target = target;
  712. err = srp_target_alloc(target, &dev->dev, INITIAL_SRP_LIMIT,
  713. SRP_MAX_IU_LEN);
  714. if (err)
  715. goto put_host;
  716. dma = (unsigned int *) vio_get_attribute(dev, "ibm,my-dma-window",
  717. &dma_size);
  718. if (!dma || dma_size != 40) {
  719. eprintk("Couldn't get window property %d\n", dma_size);
  720. err = -EIO;
  721. goto free_srp_target;
  722. }
  723. vport->liobn = dma[0];
  724. vport->riobn = dma[5];
  725. INIT_WORK(&vport->crq_work, handle_crq);
  726. err = scsi_add_host(shost, target->dev);
  727. if (err)
  728. goto free_srp_target;
  729. err = scsi_tgt_alloc_queue(shost);
  730. if (err)
  731. goto remove_host;
  732. err = crq_queue_create(&vport->crq_queue, target);
  733. if (err)
  734. goto free_queue;
  735. return 0;
  736. free_queue:
  737. scsi_tgt_free_queue(shost);
  738. remove_host:
  739. scsi_remove_host(shost);
  740. free_srp_target:
  741. srp_target_free(target);
  742. put_host:
  743. scsi_host_put(shost);
  744. free_vport:
  745. kfree(vport);
  746. return err;
  747. }
  748. static int ibmvstgt_remove(struct vio_dev *dev)
  749. {
  750. struct srp_target *target = dev_get_drvdata(&dev->dev);
  751. struct Scsi_Host *shost = target->shost;
  752. struct vio_port *vport = target->ldata;
  753. crq_queue_destroy(target);
  754. srp_remove_host(shost);
  755. scsi_remove_host(shost);
  756. scsi_tgt_free_queue(shost);
  757. srp_target_free(target);
  758. kfree(vport);
  759. scsi_host_put(shost);
  760. return 0;
  761. }
  762. static struct vio_device_id ibmvstgt_device_table[] __devinitdata = {
  763. {"v-scsi-host", "IBM,v-scsi-host"},
  764. {"",""}
  765. };
  766. MODULE_DEVICE_TABLE(vio, ibmvstgt_device_table);
  767. static struct vio_driver ibmvstgt_driver = {
  768. .id_table = ibmvstgt_device_table,
  769. .probe = ibmvstgt_probe,
  770. .remove = ibmvstgt_remove,
  771. .name = "ibmvscsis",
  772. };
  773. static int get_system_info(void)
  774. {
  775. struct device_node *rootdn;
  776. const char *id, *model, *name;
  777. const unsigned int *num;
  778. rootdn = of_find_node_by_path("/");
  779. if (!rootdn)
  780. return -ENOENT;
  781. model = of_get_property(rootdn, "model", NULL);
  782. id = of_get_property(rootdn, "system-id", NULL);
  783. if (model && id)
  784. snprintf(system_id, sizeof(system_id), "%s-%s", model, id);
  785. name = of_get_property(rootdn, "ibm,partition-name", NULL);
  786. if (name)
  787. strncpy(partition_name, name, sizeof(partition_name));
  788. num = of_get_property(rootdn, "ibm,partition-no", NULL);
  789. if (num)
  790. partition_number = *num;
  791. of_node_put(rootdn);
  792. return 0;
  793. }
  794. static struct srp_function_template ibmvstgt_transport_functions = {
  795. .tsk_mgmt_response = ibmvstgt_tsk_mgmt_response,
  796. .it_nexus_response = ibmvstgt_it_nexus_response,
  797. };
  798. static int __init ibmvstgt_init(void)
  799. {
  800. int err = -ENOMEM;
  801. printk("IBM eServer i/pSeries Virtual SCSI Target Driver\n");
  802. ibmvstgt_transport_template =
  803. srp_attach_transport(&ibmvstgt_transport_functions);
  804. if (!ibmvstgt_transport_template)
  805. return err;
  806. vtgtd = create_workqueue("ibmvtgtd");
  807. if (!vtgtd)
  808. goto release_transport;
  809. err = get_system_info();
  810. if (err)
  811. goto destroy_wq;
  812. err = vio_register_driver(&ibmvstgt_driver);
  813. if (err)
  814. goto destroy_wq;
  815. return 0;
  816. destroy_wq:
  817. destroy_workqueue(vtgtd);
  818. release_transport:
  819. srp_release_transport(ibmvstgt_transport_template);
  820. return err;
  821. }
  822. static void __exit ibmvstgt_exit(void)
  823. {
  824. printk("Unregister IBM virtual SCSI driver\n");
  825. destroy_workqueue(vtgtd);
  826. vio_unregister_driver(&ibmvstgt_driver);
  827. srp_release_transport(ibmvstgt_transport_template);
  828. }
  829. MODULE_DESCRIPTION("IBM Virtual SCSI Target");
  830. MODULE_AUTHOR("Santiago Leon");
  831. MODULE_LICENSE("GPL");
  832. module_init(ibmvstgt_init);
  833. module_exit(ibmvstgt_exit);