qla_isr.c 90 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358
  1. /*
  2. * QLogic Fibre Channel HBA Driver
  3. * Copyright (c) 2003-2014 QLogic Corporation
  4. *
  5. * See LICENSE.qla2xxx for copyright and licensing details.
  6. */
  7. #include "qla_def.h"
  8. #include "qla_target.h"
  9. #include <linux/delay.h>
  10. #include <linux/slab.h>
  11. #include <scsi/scsi_tcq.h>
  12. #include <scsi/scsi_bsg_fc.h>
  13. #include <scsi/scsi_eh.h>
  14. static void qla2x00_mbx_completion(scsi_qla_host_t *, uint16_t);
  15. static void qla2x00_status_entry(scsi_qla_host_t *, struct rsp_que *, void *);
  16. static void qla2x00_status_cont_entry(struct rsp_que *, sts_cont_entry_t *);
  17. static void qla2x00_error_entry(scsi_qla_host_t *, struct rsp_que *,
  18. sts_entry_t *);
  19. static void qla_irq_affinity_notify(struct irq_affinity_notify *,
  20. const cpumask_t *);
  21. static void qla_irq_affinity_release(struct kref *);
  22. /**
  23. * qla2100_intr_handler() - Process interrupts for the ISP2100 and ISP2200.
  24. * @irq:
  25. * @dev_id: SCSI driver HA context
  26. *
  27. * Called by system whenever the host adapter generates an interrupt.
  28. *
  29. * Returns handled flag.
  30. */
  31. irqreturn_t
  32. qla2100_intr_handler(int irq, void *dev_id)
  33. {
  34. scsi_qla_host_t *vha;
  35. struct qla_hw_data *ha;
  36. struct device_reg_2xxx __iomem *reg;
  37. int status;
  38. unsigned long iter;
  39. uint16_t hccr;
  40. uint16_t mb[4];
  41. struct rsp_que *rsp;
  42. unsigned long flags;
  43. rsp = (struct rsp_que *) dev_id;
  44. if (!rsp) {
  45. ql_log(ql_log_info, NULL, 0x505d,
  46. "%s: NULL response queue pointer.\n", __func__);
  47. return (IRQ_NONE);
  48. }
  49. ha = rsp->hw;
  50. reg = &ha->iobase->isp;
  51. status = 0;
  52. spin_lock_irqsave(&ha->hardware_lock, flags);
  53. vha = pci_get_drvdata(ha->pdev);
  54. for (iter = 50; iter--; ) {
  55. hccr = RD_REG_WORD(&reg->hccr);
  56. if (qla2x00_check_reg16_for_disconnect(vha, hccr))
  57. break;
  58. if (hccr & HCCR_RISC_PAUSE) {
  59. if (pci_channel_offline(ha->pdev))
  60. break;
  61. /*
  62. * Issue a "HARD" reset in order for the RISC interrupt
  63. * bit to be cleared. Schedule a big hammer to get
  64. * out of the RISC PAUSED state.
  65. */
  66. WRT_REG_WORD(&reg->hccr, HCCR_RESET_RISC);
  67. RD_REG_WORD(&reg->hccr);
  68. ha->isp_ops->fw_dump(vha, 1);
  69. set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
  70. break;
  71. } else if ((RD_REG_WORD(&reg->istatus) & ISR_RISC_INT) == 0)
  72. break;
  73. if (RD_REG_WORD(&reg->semaphore) & BIT_0) {
  74. WRT_REG_WORD(&reg->hccr, HCCR_CLR_RISC_INT);
  75. RD_REG_WORD(&reg->hccr);
  76. /* Get mailbox data. */
  77. mb[0] = RD_MAILBOX_REG(ha, reg, 0);
  78. if (mb[0] > 0x3fff && mb[0] < 0x8000) {
  79. qla2x00_mbx_completion(vha, mb[0]);
  80. status |= MBX_INTERRUPT;
  81. } else if (mb[0] > 0x7fff && mb[0] < 0xc000) {
  82. mb[1] = RD_MAILBOX_REG(ha, reg, 1);
  83. mb[2] = RD_MAILBOX_REG(ha, reg, 2);
  84. mb[3] = RD_MAILBOX_REG(ha, reg, 3);
  85. qla2x00_async_event(vha, rsp, mb);
  86. } else {
  87. /*EMPTY*/
  88. ql_dbg(ql_dbg_async, vha, 0x5025,
  89. "Unrecognized interrupt type (%d).\n",
  90. mb[0]);
  91. }
  92. /* Release mailbox registers. */
  93. WRT_REG_WORD(&reg->semaphore, 0);
  94. RD_REG_WORD(&reg->semaphore);
  95. } else {
  96. qla2x00_process_response_queue(rsp);
  97. WRT_REG_WORD(&reg->hccr, HCCR_CLR_RISC_INT);
  98. RD_REG_WORD(&reg->hccr);
  99. }
  100. }
  101. qla2x00_handle_mbx_completion(ha, status);
  102. spin_unlock_irqrestore(&ha->hardware_lock, flags);
  103. return (IRQ_HANDLED);
  104. }
  105. bool
  106. qla2x00_check_reg32_for_disconnect(scsi_qla_host_t *vha, uint32_t reg)
  107. {
  108. /* Check for PCI disconnection */
  109. if (reg == 0xffffffff && !pci_channel_offline(vha->hw->pdev)) {
  110. if (!test_and_set_bit(PFLG_DISCONNECTED, &vha->pci_flags) &&
  111. !test_bit(PFLG_DRIVER_REMOVING, &vha->pci_flags) &&
  112. !test_bit(PFLG_DRIVER_PROBING, &vha->pci_flags)) {
  113. /*
  114. * Schedule this (only once) on the default system
  115. * workqueue so that all the adapter workqueues and the
  116. * DPC thread can be shutdown cleanly.
  117. */
  118. schedule_work(&vha->hw->board_disable);
  119. }
  120. return true;
  121. } else
  122. return false;
  123. }
  124. bool
  125. qla2x00_check_reg16_for_disconnect(scsi_qla_host_t *vha, uint16_t reg)
  126. {
  127. return qla2x00_check_reg32_for_disconnect(vha, 0xffff0000 | reg);
  128. }
  129. /**
  130. * qla2300_intr_handler() - Process interrupts for the ISP23xx and ISP63xx.
  131. * @irq:
  132. * @dev_id: SCSI driver HA context
  133. *
  134. * Called by system whenever the host adapter generates an interrupt.
  135. *
  136. * Returns handled flag.
  137. */
  138. irqreturn_t
  139. qla2300_intr_handler(int irq, void *dev_id)
  140. {
  141. scsi_qla_host_t *vha;
  142. struct device_reg_2xxx __iomem *reg;
  143. int status;
  144. unsigned long iter;
  145. uint32_t stat;
  146. uint16_t hccr;
  147. uint16_t mb[4];
  148. struct rsp_que *rsp;
  149. struct qla_hw_data *ha;
  150. unsigned long flags;
  151. rsp = (struct rsp_que *) dev_id;
  152. if (!rsp) {
  153. ql_log(ql_log_info, NULL, 0x5058,
  154. "%s: NULL response queue pointer.\n", __func__);
  155. return (IRQ_NONE);
  156. }
  157. ha = rsp->hw;
  158. reg = &ha->iobase->isp;
  159. status = 0;
  160. spin_lock_irqsave(&ha->hardware_lock, flags);
  161. vha = pci_get_drvdata(ha->pdev);
  162. for (iter = 50; iter--; ) {
  163. stat = RD_REG_DWORD(&reg->u.isp2300.host_status);
  164. if (qla2x00_check_reg32_for_disconnect(vha, stat))
  165. break;
  166. if (stat & HSR_RISC_PAUSED) {
  167. if (unlikely(pci_channel_offline(ha->pdev)))
  168. break;
  169. hccr = RD_REG_WORD(&reg->hccr);
  170. if (hccr & (BIT_15 | BIT_13 | BIT_11 | BIT_8))
  171. ql_log(ql_log_warn, vha, 0x5026,
  172. "Parity error -- HCCR=%x, Dumping "
  173. "firmware.\n", hccr);
  174. else
  175. ql_log(ql_log_warn, vha, 0x5027,
  176. "RISC paused -- HCCR=%x, Dumping "
  177. "firmware.\n", hccr);
  178. /*
  179. * Issue a "HARD" reset in order for the RISC
  180. * interrupt bit to be cleared. Schedule a big
  181. * hammer to get out of the RISC PAUSED state.
  182. */
  183. WRT_REG_WORD(&reg->hccr, HCCR_RESET_RISC);
  184. RD_REG_WORD(&reg->hccr);
  185. ha->isp_ops->fw_dump(vha, 1);
  186. set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
  187. break;
  188. } else if ((stat & HSR_RISC_INT) == 0)
  189. break;
  190. switch (stat & 0xff) {
  191. case 0x1:
  192. case 0x2:
  193. case 0x10:
  194. case 0x11:
  195. qla2x00_mbx_completion(vha, MSW(stat));
  196. status |= MBX_INTERRUPT;
  197. /* Release mailbox registers. */
  198. WRT_REG_WORD(&reg->semaphore, 0);
  199. break;
  200. case 0x12:
  201. mb[0] = MSW(stat);
  202. mb[1] = RD_MAILBOX_REG(ha, reg, 1);
  203. mb[2] = RD_MAILBOX_REG(ha, reg, 2);
  204. mb[3] = RD_MAILBOX_REG(ha, reg, 3);
  205. qla2x00_async_event(vha, rsp, mb);
  206. break;
  207. case 0x13:
  208. qla2x00_process_response_queue(rsp);
  209. break;
  210. case 0x15:
  211. mb[0] = MBA_CMPLT_1_16BIT;
  212. mb[1] = MSW(stat);
  213. qla2x00_async_event(vha, rsp, mb);
  214. break;
  215. case 0x16:
  216. mb[0] = MBA_SCSI_COMPLETION;
  217. mb[1] = MSW(stat);
  218. mb[2] = RD_MAILBOX_REG(ha, reg, 2);
  219. qla2x00_async_event(vha, rsp, mb);
  220. break;
  221. default:
  222. ql_dbg(ql_dbg_async, vha, 0x5028,
  223. "Unrecognized interrupt type (%d).\n", stat & 0xff);
  224. break;
  225. }
  226. WRT_REG_WORD(&reg->hccr, HCCR_CLR_RISC_INT);
  227. RD_REG_WORD_RELAXED(&reg->hccr);
  228. }
  229. qla2x00_handle_mbx_completion(ha, status);
  230. spin_unlock_irqrestore(&ha->hardware_lock, flags);
  231. return (IRQ_HANDLED);
  232. }
  233. /**
  234. * qla2x00_mbx_completion() - Process mailbox command completions.
  235. * @ha: SCSI driver HA context
  236. * @mb0: Mailbox0 register
  237. */
  238. static void
  239. qla2x00_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0)
  240. {
  241. uint16_t cnt;
  242. uint32_t mboxes;
  243. uint16_t __iomem *wptr;
  244. struct qla_hw_data *ha = vha->hw;
  245. struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
  246. /* Read all mbox registers? */
  247. WARN_ON_ONCE(ha->mbx_count > 32);
  248. mboxes = (1ULL << ha->mbx_count) - 1;
  249. if (!ha->mcp)
  250. ql_dbg(ql_dbg_async, vha, 0x5001, "MBX pointer ERROR.\n");
  251. else
  252. mboxes = ha->mcp->in_mb;
  253. /* Load return mailbox registers. */
  254. ha->flags.mbox_int = 1;
  255. ha->mailbox_out[0] = mb0;
  256. mboxes >>= 1;
  257. wptr = (uint16_t __iomem *)MAILBOX_REG(ha, reg, 1);
  258. for (cnt = 1; cnt < ha->mbx_count; cnt++) {
  259. if (IS_QLA2200(ha) && cnt == 8)
  260. wptr = (uint16_t __iomem *)MAILBOX_REG(ha, reg, 8);
  261. if ((cnt == 4 || cnt == 5) && (mboxes & BIT_0))
  262. ha->mailbox_out[cnt] = qla2x00_debounce_register(wptr);
  263. else if (mboxes & BIT_0)
  264. ha->mailbox_out[cnt] = RD_REG_WORD(wptr);
  265. wptr++;
  266. mboxes >>= 1;
  267. }
  268. }
  269. static void
  270. qla81xx_idc_event(scsi_qla_host_t *vha, uint16_t aen, uint16_t descr)
  271. {
  272. static char *event[] =
  273. { "Complete", "Request Notification", "Time Extension" };
  274. int rval;
  275. struct device_reg_24xx __iomem *reg24 = &vha->hw->iobase->isp24;
  276. struct device_reg_82xx __iomem *reg82 = &vha->hw->iobase->isp82;
  277. uint16_t __iomem *wptr;
  278. uint16_t cnt, timeout, mb[QLA_IDC_ACK_REGS];
  279. /* Seed data -- mailbox1 -> mailbox7. */
  280. if (IS_QLA81XX(vha->hw) || IS_QLA83XX(vha->hw))
  281. wptr = (uint16_t __iomem *)&reg24->mailbox1;
  282. else if (IS_QLA8044(vha->hw))
  283. wptr = (uint16_t __iomem *)&reg82->mailbox_out[1];
  284. else
  285. return;
  286. for (cnt = 0; cnt < QLA_IDC_ACK_REGS; cnt++, wptr++)
  287. mb[cnt] = RD_REG_WORD(wptr);
  288. ql_dbg(ql_dbg_async, vha, 0x5021,
  289. "Inter-Driver Communication %s -- "
  290. "%04x %04x %04x %04x %04x %04x %04x.\n",
  291. event[aen & 0xff], mb[0], mb[1], mb[2], mb[3],
  292. mb[4], mb[5], mb[6]);
  293. switch (aen) {
  294. /* Handle IDC Error completion case. */
  295. case MBA_IDC_COMPLETE:
  296. if (mb[1] >> 15) {
  297. vha->hw->flags.idc_compl_status = 1;
  298. if (vha->hw->notify_dcbx_comp && !vha->vp_idx)
  299. complete(&vha->hw->dcbx_comp);
  300. }
  301. break;
  302. case MBA_IDC_NOTIFY:
  303. /* Acknowledgement needed? [Notify && non-zero timeout]. */
  304. timeout = (descr >> 8) & 0xf;
  305. ql_dbg(ql_dbg_async, vha, 0x5022,
  306. "%lu Inter-Driver Communication %s -- ACK timeout=%d.\n",
  307. vha->host_no, event[aen & 0xff], timeout);
  308. if (!timeout)
  309. return;
  310. rval = qla2x00_post_idc_ack_work(vha, mb);
  311. if (rval != QLA_SUCCESS)
  312. ql_log(ql_log_warn, vha, 0x5023,
  313. "IDC failed to post ACK.\n");
  314. break;
  315. case MBA_IDC_TIME_EXT:
  316. vha->hw->idc_extend_tmo = descr;
  317. ql_dbg(ql_dbg_async, vha, 0x5087,
  318. "%lu Inter-Driver Communication %s -- "
  319. "Extend timeout by=%d.\n",
  320. vha->host_no, event[aen & 0xff], vha->hw->idc_extend_tmo);
  321. break;
  322. }
  323. }
  324. #define LS_UNKNOWN 2
  325. const char *
  326. qla2x00_get_link_speed_str(struct qla_hw_data *ha, uint16_t speed)
  327. {
  328. static const char *const link_speeds[] = {
  329. "1", "2", "?", "4", "8", "16", "32", "10"
  330. };
  331. #define QLA_LAST_SPEED 7
  332. if (IS_QLA2100(ha) || IS_QLA2200(ha))
  333. return link_speeds[0];
  334. else if (speed == 0x13)
  335. return link_speeds[QLA_LAST_SPEED];
  336. else if (speed < QLA_LAST_SPEED)
  337. return link_speeds[speed];
  338. else
  339. return link_speeds[LS_UNKNOWN];
  340. }
  341. static void
  342. qla83xx_handle_8200_aen(scsi_qla_host_t *vha, uint16_t *mb)
  343. {
  344. struct qla_hw_data *ha = vha->hw;
  345. /*
  346. * 8200 AEN Interpretation:
  347. * mb[0] = AEN code
  348. * mb[1] = AEN Reason code
  349. * mb[2] = LSW of Peg-Halt Status-1 Register
  350. * mb[6] = MSW of Peg-Halt Status-1 Register
  351. * mb[3] = LSW of Peg-Halt Status-2 register
  352. * mb[7] = MSW of Peg-Halt Status-2 register
  353. * mb[4] = IDC Device-State Register value
  354. * mb[5] = IDC Driver-Presence Register value
  355. */
  356. ql_dbg(ql_dbg_async, vha, 0x506b, "AEN Code: mb[0] = 0x%x AEN reason: "
  357. "mb[1] = 0x%x PH-status1: mb[2] = 0x%x PH-status1: mb[6] = 0x%x.\n",
  358. mb[0], mb[1], mb[2], mb[6]);
  359. ql_dbg(ql_dbg_async, vha, 0x506c, "PH-status2: mb[3] = 0x%x "
  360. "PH-status2: mb[7] = 0x%x Device-State: mb[4] = 0x%x "
  361. "Drv-Presence: mb[5] = 0x%x.\n", mb[3], mb[7], mb[4], mb[5]);
  362. if (mb[1] & (IDC_PEG_HALT_STATUS_CHANGE | IDC_NIC_FW_REPORTED_FAILURE |
  363. IDC_HEARTBEAT_FAILURE)) {
  364. ha->flags.nic_core_hung = 1;
  365. ql_log(ql_log_warn, vha, 0x5060,
  366. "83XX: F/W Error Reported: Check if reset required.\n");
  367. if (mb[1] & IDC_PEG_HALT_STATUS_CHANGE) {
  368. uint32_t protocol_engine_id, fw_err_code, err_level;
  369. /*
  370. * IDC_PEG_HALT_STATUS_CHANGE interpretation:
  371. * - PEG-Halt Status-1 Register:
  372. * (LSW = mb[2], MSW = mb[6])
  373. * Bits 0-7 = protocol-engine ID
  374. * Bits 8-28 = f/w error code
  375. * Bits 29-31 = Error-level
  376. * Error-level 0x1 = Non-Fatal error
  377. * Error-level 0x2 = Recoverable Fatal error
  378. * Error-level 0x4 = UnRecoverable Fatal error
  379. * - PEG-Halt Status-2 Register:
  380. * (LSW = mb[3], MSW = mb[7])
  381. */
  382. protocol_engine_id = (mb[2] & 0xff);
  383. fw_err_code = (((mb[2] & 0xff00) >> 8) |
  384. ((mb[6] & 0x1fff) << 8));
  385. err_level = ((mb[6] & 0xe000) >> 13);
  386. ql_log(ql_log_warn, vha, 0x5061, "PegHalt Status-1 "
  387. "Register: protocol_engine_id=0x%x "
  388. "fw_err_code=0x%x err_level=0x%x.\n",
  389. protocol_engine_id, fw_err_code, err_level);
  390. ql_log(ql_log_warn, vha, 0x5062, "PegHalt Status-2 "
  391. "Register: 0x%x%x.\n", mb[7], mb[3]);
  392. if (err_level == ERR_LEVEL_NON_FATAL) {
  393. ql_log(ql_log_warn, vha, 0x5063,
  394. "Not a fatal error, f/w has recovered "
  395. "iteself.\n");
  396. } else if (err_level == ERR_LEVEL_RECOVERABLE_FATAL) {
  397. ql_log(ql_log_fatal, vha, 0x5064,
  398. "Recoverable Fatal error: Chip reset "
  399. "required.\n");
  400. qla83xx_schedule_work(vha,
  401. QLA83XX_NIC_CORE_RESET);
  402. } else if (err_level == ERR_LEVEL_UNRECOVERABLE_FATAL) {
  403. ql_log(ql_log_fatal, vha, 0x5065,
  404. "Unrecoverable Fatal error: Set FAILED "
  405. "state, reboot required.\n");
  406. qla83xx_schedule_work(vha,
  407. QLA83XX_NIC_CORE_UNRECOVERABLE);
  408. }
  409. }
  410. if (mb[1] & IDC_NIC_FW_REPORTED_FAILURE) {
  411. uint16_t peg_fw_state, nw_interface_link_up;
  412. uint16_t nw_interface_signal_detect, sfp_status;
  413. uint16_t htbt_counter, htbt_monitor_enable;
  414. uint16_t sfp_additonal_info, sfp_multirate;
  415. uint16_t sfp_tx_fault, link_speed, dcbx_status;
  416. /*
  417. * IDC_NIC_FW_REPORTED_FAILURE interpretation:
  418. * - PEG-to-FC Status Register:
  419. * (LSW = mb[2], MSW = mb[6])
  420. * Bits 0-7 = Peg-Firmware state
  421. * Bit 8 = N/W Interface Link-up
  422. * Bit 9 = N/W Interface signal detected
  423. * Bits 10-11 = SFP Status
  424. * SFP Status 0x0 = SFP+ transceiver not expected
  425. * SFP Status 0x1 = SFP+ transceiver not present
  426. * SFP Status 0x2 = SFP+ transceiver invalid
  427. * SFP Status 0x3 = SFP+ transceiver present and
  428. * valid
  429. * Bits 12-14 = Heartbeat Counter
  430. * Bit 15 = Heartbeat Monitor Enable
  431. * Bits 16-17 = SFP Additional Info
  432. * SFP info 0x0 = Unregocnized transceiver for
  433. * Ethernet
  434. * SFP info 0x1 = SFP+ brand validation failed
  435. * SFP info 0x2 = SFP+ speed validation failed
  436. * SFP info 0x3 = SFP+ access error
  437. * Bit 18 = SFP Multirate
  438. * Bit 19 = SFP Tx Fault
  439. * Bits 20-22 = Link Speed
  440. * Bits 23-27 = Reserved
  441. * Bits 28-30 = DCBX Status
  442. * DCBX Status 0x0 = DCBX Disabled
  443. * DCBX Status 0x1 = DCBX Enabled
  444. * DCBX Status 0x2 = DCBX Exchange error
  445. * Bit 31 = Reserved
  446. */
  447. peg_fw_state = (mb[2] & 0x00ff);
  448. nw_interface_link_up = ((mb[2] & 0x0100) >> 8);
  449. nw_interface_signal_detect = ((mb[2] & 0x0200) >> 9);
  450. sfp_status = ((mb[2] & 0x0c00) >> 10);
  451. htbt_counter = ((mb[2] & 0x7000) >> 12);
  452. htbt_monitor_enable = ((mb[2] & 0x8000) >> 15);
  453. sfp_additonal_info = (mb[6] & 0x0003);
  454. sfp_multirate = ((mb[6] & 0x0004) >> 2);
  455. sfp_tx_fault = ((mb[6] & 0x0008) >> 3);
  456. link_speed = ((mb[6] & 0x0070) >> 4);
  457. dcbx_status = ((mb[6] & 0x7000) >> 12);
  458. ql_log(ql_log_warn, vha, 0x5066,
  459. "Peg-to-Fc Status Register:\n"
  460. "peg_fw_state=0x%x, nw_interface_link_up=0x%x, "
  461. "nw_interface_signal_detect=0x%x"
  462. "\nsfp_statis=0x%x.\n ", peg_fw_state,
  463. nw_interface_link_up, nw_interface_signal_detect,
  464. sfp_status);
  465. ql_log(ql_log_warn, vha, 0x5067,
  466. "htbt_counter=0x%x, htbt_monitor_enable=0x%x, "
  467. "sfp_additonal_info=0x%x, sfp_multirate=0x%x.\n ",
  468. htbt_counter, htbt_monitor_enable,
  469. sfp_additonal_info, sfp_multirate);
  470. ql_log(ql_log_warn, vha, 0x5068,
  471. "sfp_tx_fault=0x%x, link_state=0x%x, "
  472. "dcbx_status=0x%x.\n", sfp_tx_fault, link_speed,
  473. dcbx_status);
  474. qla83xx_schedule_work(vha, QLA83XX_NIC_CORE_RESET);
  475. }
  476. if (mb[1] & IDC_HEARTBEAT_FAILURE) {
  477. ql_log(ql_log_warn, vha, 0x5069,
  478. "Heartbeat Failure encountered, chip reset "
  479. "required.\n");
  480. qla83xx_schedule_work(vha, QLA83XX_NIC_CORE_RESET);
  481. }
  482. }
  483. if (mb[1] & IDC_DEVICE_STATE_CHANGE) {
  484. ql_log(ql_log_info, vha, 0x506a,
  485. "IDC Device-State changed = 0x%x.\n", mb[4]);
  486. if (ha->flags.nic_core_reset_owner)
  487. return;
  488. qla83xx_schedule_work(vha, MBA_IDC_AEN);
  489. }
  490. }
  491. int
  492. qla2x00_is_a_vp_did(scsi_qla_host_t *vha, uint32_t rscn_entry)
  493. {
  494. struct qla_hw_data *ha = vha->hw;
  495. scsi_qla_host_t *vp;
  496. uint32_t vp_did;
  497. unsigned long flags;
  498. int ret = 0;
  499. if (!ha->num_vhosts)
  500. return ret;
  501. spin_lock_irqsave(&ha->vport_slock, flags);
  502. list_for_each_entry(vp, &ha->vp_list, list) {
  503. vp_did = vp->d_id.b24;
  504. if (vp_did == rscn_entry) {
  505. ret = 1;
  506. break;
  507. }
  508. }
  509. spin_unlock_irqrestore(&ha->vport_slock, flags);
  510. return ret;
  511. }
  512. static inline fc_port_t *
  513. qla2x00_find_fcport_by_loopid(scsi_qla_host_t *vha, uint16_t loop_id)
  514. {
  515. fc_port_t *fcport;
  516. list_for_each_entry(fcport, &vha->vp_fcports, list)
  517. if (fcport->loop_id == loop_id)
  518. return fcport;
  519. return NULL;
  520. }
  521. /**
  522. * qla2x00_async_event() - Process aynchronous events.
  523. * @ha: SCSI driver HA context
  524. * @mb: Mailbox registers (0 - 3)
  525. */
  526. void
  527. qla2x00_async_event(scsi_qla_host_t *vha, struct rsp_que *rsp, uint16_t *mb)
  528. {
  529. uint16_t handle_cnt;
  530. uint16_t cnt, mbx;
  531. uint32_t handles[5];
  532. struct qla_hw_data *ha = vha->hw;
  533. struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
  534. struct device_reg_24xx __iomem *reg24 = &ha->iobase->isp24;
  535. struct device_reg_82xx __iomem *reg82 = &ha->iobase->isp82;
  536. uint32_t rscn_entry, host_pid;
  537. unsigned long flags;
  538. fc_port_t *fcport = NULL;
  539. /* Setup to process RIO completion. */
  540. handle_cnt = 0;
  541. if (IS_CNA_CAPABLE(ha))
  542. goto skip_rio;
  543. switch (mb[0]) {
  544. case MBA_SCSI_COMPLETION:
  545. handles[0] = le32_to_cpu((uint32_t)((mb[2] << 16) | mb[1]));
  546. handle_cnt = 1;
  547. break;
  548. case MBA_CMPLT_1_16BIT:
  549. handles[0] = mb[1];
  550. handle_cnt = 1;
  551. mb[0] = MBA_SCSI_COMPLETION;
  552. break;
  553. case MBA_CMPLT_2_16BIT:
  554. handles[0] = mb[1];
  555. handles[1] = mb[2];
  556. handle_cnt = 2;
  557. mb[0] = MBA_SCSI_COMPLETION;
  558. break;
  559. case MBA_CMPLT_3_16BIT:
  560. handles[0] = mb[1];
  561. handles[1] = mb[2];
  562. handles[2] = mb[3];
  563. handle_cnt = 3;
  564. mb[0] = MBA_SCSI_COMPLETION;
  565. break;
  566. case MBA_CMPLT_4_16BIT:
  567. handles[0] = mb[1];
  568. handles[1] = mb[2];
  569. handles[2] = mb[3];
  570. handles[3] = (uint32_t)RD_MAILBOX_REG(ha, reg, 6);
  571. handle_cnt = 4;
  572. mb[0] = MBA_SCSI_COMPLETION;
  573. break;
  574. case MBA_CMPLT_5_16BIT:
  575. handles[0] = mb[1];
  576. handles[1] = mb[2];
  577. handles[2] = mb[3];
  578. handles[3] = (uint32_t)RD_MAILBOX_REG(ha, reg, 6);
  579. handles[4] = (uint32_t)RD_MAILBOX_REG(ha, reg, 7);
  580. handle_cnt = 5;
  581. mb[0] = MBA_SCSI_COMPLETION;
  582. break;
  583. case MBA_CMPLT_2_32BIT:
  584. handles[0] = le32_to_cpu((uint32_t)((mb[2] << 16) | mb[1]));
  585. handles[1] = le32_to_cpu(
  586. ((uint32_t)(RD_MAILBOX_REG(ha, reg, 7) << 16)) |
  587. RD_MAILBOX_REG(ha, reg, 6));
  588. handle_cnt = 2;
  589. mb[0] = MBA_SCSI_COMPLETION;
  590. break;
  591. default:
  592. break;
  593. }
  594. skip_rio:
  595. switch (mb[0]) {
  596. case MBA_SCSI_COMPLETION: /* Fast Post */
  597. if (!vha->flags.online)
  598. break;
  599. for (cnt = 0; cnt < handle_cnt; cnt++)
  600. qla2x00_process_completed_request(vha, rsp->req,
  601. handles[cnt]);
  602. break;
  603. case MBA_RESET: /* Reset */
  604. ql_dbg(ql_dbg_async, vha, 0x5002,
  605. "Asynchronous RESET.\n");
  606. set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
  607. break;
  608. case MBA_SYSTEM_ERR: /* System Error */
  609. mbx = (IS_QLA81XX(ha) || IS_QLA83XX(ha) || IS_QLA27XX(ha)) ?
  610. RD_REG_WORD(&reg24->mailbox7) : 0;
  611. ql_log(ql_log_warn, vha, 0x5003,
  612. "ISP System Error - mbx1=%xh mbx2=%xh mbx3=%xh "
  613. "mbx7=%xh.\n", mb[1], mb[2], mb[3], mbx);
  614. ha->isp_ops->fw_dump(vha, 1);
  615. if (IS_FWI2_CAPABLE(ha)) {
  616. if (mb[1] == 0 && mb[2] == 0) {
  617. ql_log(ql_log_fatal, vha, 0x5004,
  618. "Unrecoverable Hardware Error: adapter "
  619. "marked OFFLINE!\n");
  620. vha->flags.online = 0;
  621. vha->device_flags |= DFLG_DEV_FAILED;
  622. } else {
  623. /* Check to see if MPI timeout occurred */
  624. if ((mbx & MBX_3) && (ha->port_no == 0))
  625. set_bit(MPI_RESET_NEEDED,
  626. &vha->dpc_flags);
  627. set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
  628. }
  629. } else if (mb[1] == 0) {
  630. ql_log(ql_log_fatal, vha, 0x5005,
  631. "Unrecoverable Hardware Error: adapter marked "
  632. "OFFLINE!\n");
  633. vha->flags.online = 0;
  634. vha->device_flags |= DFLG_DEV_FAILED;
  635. } else
  636. set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
  637. break;
  638. case MBA_REQ_TRANSFER_ERR: /* Request Transfer Error */
  639. ql_log(ql_log_warn, vha, 0x5006,
  640. "ISP Request Transfer Error (%x).\n", mb[1]);
  641. set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
  642. break;
  643. case MBA_RSP_TRANSFER_ERR: /* Response Transfer Error */
  644. ql_log(ql_log_warn, vha, 0x5007,
  645. "ISP Response Transfer Error (%x).\n", mb[1]);
  646. set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
  647. break;
  648. case MBA_WAKEUP_THRES: /* Request Queue Wake-up */
  649. ql_dbg(ql_dbg_async, vha, 0x5008,
  650. "Asynchronous WAKEUP_THRES (%x).\n", mb[1]);
  651. break;
  652. case MBA_LOOP_INIT_ERR:
  653. ql_log(ql_log_warn, vha, 0x5090,
  654. "LOOP INIT ERROR (%x).\n", mb[1]);
  655. ha->isp_ops->fw_dump(vha, 1);
  656. set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
  657. break;
  658. case MBA_LIP_OCCURRED: /* Loop Initialization Procedure */
  659. ql_dbg(ql_dbg_async, vha, 0x5009,
  660. "LIP occurred (%x).\n", mb[1]);
  661. if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
  662. atomic_set(&vha->loop_state, LOOP_DOWN);
  663. atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
  664. qla2x00_mark_all_devices_lost(vha, 1);
  665. }
  666. if (vha->vp_idx) {
  667. atomic_set(&vha->vp_state, VP_FAILED);
  668. fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
  669. }
  670. set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags);
  671. set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags);
  672. vha->flags.management_server_logged_in = 0;
  673. qla2x00_post_aen_work(vha, FCH_EVT_LIP, mb[1]);
  674. break;
  675. case MBA_LOOP_UP: /* Loop Up Event */
  676. if (IS_QLA2100(ha) || IS_QLA2200(ha))
  677. ha->link_data_rate = PORT_SPEED_1GB;
  678. else
  679. ha->link_data_rate = mb[1];
  680. ql_log(ql_log_info, vha, 0x500a,
  681. "LOOP UP detected (%s Gbps).\n",
  682. qla2x00_get_link_speed_str(ha, ha->link_data_rate));
  683. vha->flags.management_server_logged_in = 0;
  684. qla2x00_post_aen_work(vha, FCH_EVT_LINKUP, ha->link_data_rate);
  685. break;
  686. case MBA_LOOP_DOWN: /* Loop Down Event */
  687. mbx = (IS_QLA81XX(ha) || IS_QLA8031(ha))
  688. ? RD_REG_WORD(&reg24->mailbox4) : 0;
  689. mbx = (IS_P3P_TYPE(ha)) ? RD_REG_WORD(&reg82->mailbox_out[4])
  690. : mbx;
  691. ql_log(ql_log_info, vha, 0x500b,
  692. "LOOP DOWN detected (%x %x %x %x).\n",
  693. mb[1], mb[2], mb[3], mbx);
  694. if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
  695. atomic_set(&vha->loop_state, LOOP_DOWN);
  696. atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
  697. /*
  698. * In case of loop down, restore WWPN from
  699. * NVRAM in case of FA-WWPN capable ISP
  700. * Restore for Physical Port only
  701. */
  702. if (!vha->vp_idx) {
  703. if (ha->flags.fawwpn_enabled) {
  704. void *wwpn = ha->init_cb->port_name;
  705. memcpy(vha->port_name, wwpn, WWN_SIZE);
  706. fc_host_port_name(vha->host) =
  707. wwn_to_u64(vha->port_name);
  708. ql_dbg(ql_dbg_init + ql_dbg_verbose,
  709. vha, 0x0144, "LOOP DOWN detected,"
  710. "restore WWPN %016llx\n",
  711. wwn_to_u64(vha->port_name));
  712. }
  713. clear_bit(VP_CONFIG_OK, &vha->vp_flags);
  714. }
  715. vha->device_flags |= DFLG_NO_CABLE;
  716. qla2x00_mark_all_devices_lost(vha, 1);
  717. }
  718. if (vha->vp_idx) {
  719. atomic_set(&vha->vp_state, VP_FAILED);
  720. fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
  721. }
  722. vha->flags.management_server_logged_in = 0;
  723. ha->link_data_rate = PORT_SPEED_UNKNOWN;
  724. qla2x00_post_aen_work(vha, FCH_EVT_LINKDOWN, 0);
  725. break;
  726. case MBA_LIP_RESET: /* LIP reset occurred */
  727. ql_dbg(ql_dbg_async, vha, 0x500c,
  728. "LIP reset occurred (%x).\n", mb[1]);
  729. if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
  730. atomic_set(&vha->loop_state, LOOP_DOWN);
  731. atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
  732. qla2x00_mark_all_devices_lost(vha, 1);
  733. }
  734. if (vha->vp_idx) {
  735. atomic_set(&vha->vp_state, VP_FAILED);
  736. fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
  737. }
  738. set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
  739. ha->operating_mode = LOOP;
  740. vha->flags.management_server_logged_in = 0;
  741. qla2x00_post_aen_work(vha, FCH_EVT_LIPRESET, mb[1]);
  742. break;
  743. /* case MBA_DCBX_COMPLETE: */
  744. case MBA_POINT_TO_POINT: /* Point-to-Point */
  745. if (IS_QLA2100(ha))
  746. break;
  747. if (IS_CNA_CAPABLE(ha)) {
  748. ql_dbg(ql_dbg_async, vha, 0x500d,
  749. "DCBX Completed -- %04x %04x %04x.\n",
  750. mb[1], mb[2], mb[3]);
  751. if (ha->notify_dcbx_comp && !vha->vp_idx)
  752. complete(&ha->dcbx_comp);
  753. } else
  754. ql_dbg(ql_dbg_async, vha, 0x500e,
  755. "Asynchronous P2P MODE received.\n");
  756. /*
  757. * Until there's a transition from loop down to loop up, treat
  758. * this as loop down only.
  759. */
  760. if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
  761. atomic_set(&vha->loop_state, LOOP_DOWN);
  762. if (!atomic_read(&vha->loop_down_timer))
  763. atomic_set(&vha->loop_down_timer,
  764. LOOP_DOWN_TIME);
  765. qla2x00_mark_all_devices_lost(vha, 1);
  766. }
  767. if (vha->vp_idx) {
  768. atomic_set(&vha->vp_state, VP_FAILED);
  769. fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
  770. }
  771. if (!(test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)))
  772. set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
  773. set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags);
  774. set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags);
  775. ha->flags.gpsc_supported = 1;
  776. vha->flags.management_server_logged_in = 0;
  777. break;
  778. case MBA_CHG_IN_CONNECTION: /* Change in connection mode */
  779. if (IS_QLA2100(ha))
  780. break;
  781. ql_dbg(ql_dbg_async, vha, 0x500f,
  782. "Configuration change detected: value=%x.\n", mb[1]);
  783. if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
  784. atomic_set(&vha->loop_state, LOOP_DOWN);
  785. if (!atomic_read(&vha->loop_down_timer))
  786. atomic_set(&vha->loop_down_timer,
  787. LOOP_DOWN_TIME);
  788. qla2x00_mark_all_devices_lost(vha, 1);
  789. }
  790. if (vha->vp_idx) {
  791. atomic_set(&vha->vp_state, VP_FAILED);
  792. fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
  793. }
  794. set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
  795. set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
  796. break;
  797. case MBA_PORT_UPDATE: /* Port database update */
  798. /*
  799. * Handle only global and vn-port update events
  800. *
  801. * Relevant inputs:
  802. * mb[1] = N_Port handle of changed port
  803. * OR 0xffff for global event
  804. * mb[2] = New login state
  805. * 7 = Port logged out
  806. * mb[3] = LSB is vp_idx, 0xff = all vps
  807. *
  808. * Skip processing if:
  809. * Event is global, vp_idx is NOT all vps,
  810. * vp_idx does not match
  811. * Event is not global, vp_idx does not match
  812. */
  813. if (IS_QLA2XXX_MIDTYPE(ha) &&
  814. ((mb[1] == 0xffff && (mb[3] & 0xff) != 0xff) ||
  815. (mb[1] != 0xffff)) && vha->vp_idx != (mb[3] & 0xff))
  816. break;
  817. if (mb[2] == 0x7) {
  818. ql_dbg(ql_dbg_async, vha, 0x5010,
  819. "Port %s %04x %04x %04x.\n",
  820. mb[1] == 0xffff ? "unavailable" : "logout",
  821. mb[1], mb[2], mb[3]);
  822. if (mb[1] == 0xffff)
  823. goto global_port_update;
  824. /* Port logout */
  825. fcport = qla2x00_find_fcport_by_loopid(vha, mb[1]);
  826. if (!fcport)
  827. break;
  828. if (atomic_read(&fcport->state) != FCS_ONLINE)
  829. break;
  830. ql_dbg(ql_dbg_async, vha, 0x508a,
  831. "Marking port lost loopid=%04x portid=%06x.\n",
  832. fcport->loop_id, fcport->d_id.b24);
  833. qla2x00_mark_device_lost(fcport->vha, fcport, 1, 1);
  834. break;
  835. global_port_update:
  836. if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
  837. atomic_set(&vha->loop_state, LOOP_DOWN);
  838. atomic_set(&vha->loop_down_timer,
  839. LOOP_DOWN_TIME);
  840. vha->device_flags |= DFLG_NO_CABLE;
  841. qla2x00_mark_all_devices_lost(vha, 1);
  842. }
  843. if (vha->vp_idx) {
  844. atomic_set(&vha->vp_state, VP_FAILED);
  845. fc_vport_set_state(vha->fc_vport,
  846. FC_VPORT_FAILED);
  847. qla2x00_mark_all_devices_lost(vha, 1);
  848. }
  849. vha->flags.management_server_logged_in = 0;
  850. ha->link_data_rate = PORT_SPEED_UNKNOWN;
  851. break;
  852. }
  853. /*
  854. * If PORT UPDATE is global (received LIP_OCCURRED/LIP_RESET
  855. * event etc. earlier indicating loop is down) then process
  856. * it. Otherwise ignore it and Wait for RSCN to come in.
  857. */
  858. atomic_set(&vha->loop_down_timer, 0);
  859. if (atomic_read(&vha->loop_state) != LOOP_DOWN &&
  860. atomic_read(&vha->loop_state) != LOOP_DEAD) {
  861. ql_dbg(ql_dbg_async, vha, 0x5011,
  862. "Asynchronous PORT UPDATE ignored %04x/%04x/%04x.\n",
  863. mb[1], mb[2], mb[3]);
  864. qlt_async_event(mb[0], vha, mb);
  865. break;
  866. }
  867. ql_dbg(ql_dbg_async, vha, 0x5012,
  868. "Port database changed %04x %04x %04x.\n",
  869. mb[1], mb[2], mb[3]);
  870. /*
  871. * Mark all devices as missing so we will login again.
  872. */
  873. atomic_set(&vha->loop_state, LOOP_UP);
  874. qla2x00_mark_all_devices_lost(vha, 1);
  875. if (vha->vp_idx == 0 && !qla_ini_mode_enabled(vha))
  876. set_bit(SCR_PENDING, &vha->dpc_flags);
  877. set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
  878. set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
  879. set_bit(VP_CONFIG_OK, &vha->vp_flags);
  880. qlt_async_event(mb[0], vha, mb);
  881. break;
  882. case MBA_RSCN_UPDATE: /* State Change Registration */
  883. /* Check if the Vport has issued a SCR */
  884. if (vha->vp_idx && test_bit(VP_SCR_NEEDED, &vha->vp_flags))
  885. break;
  886. /* Only handle SCNs for our Vport index. */
  887. if (ha->flags.npiv_supported && vha->vp_idx != (mb[3] & 0xff))
  888. break;
  889. ql_dbg(ql_dbg_async, vha, 0x5013,
  890. "RSCN database changed -- %04x %04x %04x.\n",
  891. mb[1], mb[2], mb[3]);
  892. rscn_entry = ((mb[1] & 0xff) << 16) | mb[2];
  893. host_pid = (vha->d_id.b.domain << 16) | (vha->d_id.b.area << 8)
  894. | vha->d_id.b.al_pa;
  895. if (rscn_entry == host_pid) {
  896. ql_dbg(ql_dbg_async, vha, 0x5014,
  897. "Ignoring RSCN update to local host "
  898. "port ID (%06x).\n", host_pid);
  899. break;
  900. }
  901. /* Ignore reserved bits from RSCN-payload. */
  902. rscn_entry = ((mb[1] & 0x3ff) << 16) | mb[2];
  903. /* Skip RSCNs for virtual ports on the same physical port */
  904. if (qla2x00_is_a_vp_did(vha, rscn_entry))
  905. break;
  906. /*
  907. * Search for the rport related to this RSCN entry and mark it
  908. * as lost.
  909. */
  910. list_for_each_entry(fcport, &vha->vp_fcports, list) {
  911. if (atomic_read(&fcport->state) != FCS_ONLINE)
  912. continue;
  913. if (fcport->d_id.b24 == rscn_entry) {
  914. qla2x00_mark_device_lost(vha, fcport, 0, 0);
  915. break;
  916. }
  917. }
  918. atomic_set(&vha->loop_down_timer, 0);
  919. vha->flags.management_server_logged_in = 0;
  920. set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
  921. set_bit(RSCN_UPDATE, &vha->dpc_flags);
  922. qla2x00_post_aen_work(vha, FCH_EVT_RSCN, rscn_entry);
  923. break;
  924. /* case MBA_RIO_RESPONSE: */
  925. case MBA_ZIO_RESPONSE:
  926. ql_dbg(ql_dbg_async, vha, 0x5015,
  927. "[R|Z]IO update completion.\n");
  928. if (IS_FWI2_CAPABLE(ha))
  929. qla24xx_process_response_queue(vha, rsp);
  930. else
  931. qla2x00_process_response_queue(rsp);
  932. break;
  933. case MBA_DISCARD_RND_FRAME:
  934. ql_dbg(ql_dbg_async, vha, 0x5016,
  935. "Discard RND Frame -- %04x %04x %04x.\n",
  936. mb[1], mb[2], mb[3]);
  937. break;
  938. case MBA_TRACE_NOTIFICATION:
  939. ql_dbg(ql_dbg_async, vha, 0x5017,
  940. "Trace Notification -- %04x %04x.\n", mb[1], mb[2]);
  941. break;
  942. case MBA_ISP84XX_ALERT:
  943. ql_dbg(ql_dbg_async, vha, 0x5018,
  944. "ISP84XX Alert Notification -- %04x %04x %04x.\n",
  945. mb[1], mb[2], mb[3]);
  946. spin_lock_irqsave(&ha->cs84xx->access_lock, flags);
  947. switch (mb[1]) {
  948. case A84_PANIC_RECOVERY:
  949. ql_log(ql_log_info, vha, 0x5019,
  950. "Alert 84XX: panic recovery %04x %04x.\n",
  951. mb[2], mb[3]);
  952. break;
  953. case A84_OP_LOGIN_COMPLETE:
  954. ha->cs84xx->op_fw_version = mb[3] << 16 | mb[2];
  955. ql_log(ql_log_info, vha, 0x501a,
  956. "Alert 84XX: firmware version %x.\n",
  957. ha->cs84xx->op_fw_version);
  958. break;
  959. case A84_DIAG_LOGIN_COMPLETE:
  960. ha->cs84xx->diag_fw_version = mb[3] << 16 | mb[2];
  961. ql_log(ql_log_info, vha, 0x501b,
  962. "Alert 84XX: diagnostic firmware version %x.\n",
  963. ha->cs84xx->diag_fw_version);
  964. break;
  965. case A84_GOLD_LOGIN_COMPLETE:
  966. ha->cs84xx->diag_fw_version = mb[3] << 16 | mb[2];
  967. ha->cs84xx->fw_update = 1;
  968. ql_log(ql_log_info, vha, 0x501c,
  969. "Alert 84XX: gold firmware version %x.\n",
  970. ha->cs84xx->gold_fw_version);
  971. break;
  972. default:
  973. ql_log(ql_log_warn, vha, 0x501d,
  974. "Alert 84xx: Invalid Alert %04x %04x %04x.\n",
  975. mb[1], mb[2], mb[3]);
  976. }
  977. spin_unlock_irqrestore(&ha->cs84xx->access_lock, flags);
  978. break;
  979. case MBA_DCBX_START:
  980. ql_dbg(ql_dbg_async, vha, 0x501e,
  981. "DCBX Started -- %04x %04x %04x.\n",
  982. mb[1], mb[2], mb[3]);
  983. break;
  984. case MBA_DCBX_PARAM_UPDATE:
  985. ql_dbg(ql_dbg_async, vha, 0x501f,
  986. "DCBX Parameters Updated -- %04x %04x %04x.\n",
  987. mb[1], mb[2], mb[3]);
  988. break;
  989. case MBA_FCF_CONF_ERR:
  990. ql_dbg(ql_dbg_async, vha, 0x5020,
  991. "FCF Configuration Error -- %04x %04x %04x.\n",
  992. mb[1], mb[2], mb[3]);
  993. break;
  994. case MBA_IDC_NOTIFY:
  995. if (IS_QLA8031(vha->hw) || IS_QLA8044(ha)) {
  996. mb[4] = RD_REG_WORD(&reg24->mailbox4);
  997. if (((mb[2] & 0x7fff) == MBC_PORT_RESET ||
  998. (mb[2] & 0x7fff) == MBC_SET_PORT_CONFIG) &&
  999. (mb[4] & INTERNAL_LOOPBACK_MASK) != 0) {
  1000. set_bit(ISP_QUIESCE_NEEDED, &vha->dpc_flags);
  1001. /*
  1002. * Extend loop down timer since port is active.
  1003. */
  1004. if (atomic_read(&vha->loop_state) == LOOP_DOWN)
  1005. atomic_set(&vha->loop_down_timer,
  1006. LOOP_DOWN_TIME);
  1007. qla2xxx_wake_dpc(vha);
  1008. }
  1009. }
  1010. case MBA_IDC_COMPLETE:
  1011. if (ha->notify_lb_portup_comp && !vha->vp_idx)
  1012. complete(&ha->lb_portup_comp);
  1013. /* Fallthru */
  1014. case MBA_IDC_TIME_EXT:
  1015. if (IS_QLA81XX(vha->hw) || IS_QLA8031(vha->hw) ||
  1016. IS_QLA8044(ha))
  1017. qla81xx_idc_event(vha, mb[0], mb[1]);
  1018. break;
  1019. case MBA_IDC_AEN:
  1020. mb[4] = RD_REG_WORD(&reg24->mailbox4);
  1021. mb[5] = RD_REG_WORD(&reg24->mailbox5);
  1022. mb[6] = RD_REG_WORD(&reg24->mailbox6);
  1023. mb[7] = RD_REG_WORD(&reg24->mailbox7);
  1024. qla83xx_handle_8200_aen(vha, mb);
  1025. break;
  1026. case MBA_DPORT_DIAGNOSTICS:
  1027. ql_dbg(ql_dbg_async, vha, 0x5052,
  1028. "D-Port Diagnostics: %04x result=%s\n",
  1029. mb[0],
  1030. mb[1] == 0 ? "start" :
  1031. mb[1] == 1 ? "done (pass)" :
  1032. mb[1] == 2 ? "done (error)" : "other");
  1033. break;
  1034. case MBA_TEMPERATURE_ALERT:
  1035. ql_dbg(ql_dbg_async, vha, 0x505e,
  1036. "TEMPERATURE ALERT: %04x %04x %04x\n", mb[1], mb[2], mb[3]);
  1037. if (mb[1] == 0x12)
  1038. schedule_work(&ha->board_disable);
  1039. break;
  1040. default:
  1041. ql_dbg(ql_dbg_async, vha, 0x5057,
  1042. "Unknown AEN:%04x %04x %04x %04x\n",
  1043. mb[0], mb[1], mb[2], mb[3]);
  1044. }
  1045. qlt_async_event(mb[0], vha, mb);
  1046. if (!vha->vp_idx && ha->num_vhosts)
  1047. qla2x00_alert_all_vps(rsp, mb);
  1048. }
  1049. /**
  1050. * qla2x00_process_completed_request() - Process a Fast Post response.
  1051. * @ha: SCSI driver HA context
  1052. * @index: SRB index
  1053. */
  1054. void
  1055. qla2x00_process_completed_request(struct scsi_qla_host *vha,
  1056. struct req_que *req, uint32_t index)
  1057. {
  1058. srb_t *sp;
  1059. struct qla_hw_data *ha = vha->hw;
  1060. /* Validate handle. */
  1061. if (index >= req->num_outstanding_cmds) {
  1062. ql_log(ql_log_warn, vha, 0x3014,
  1063. "Invalid SCSI command index (%x).\n", index);
  1064. if (IS_P3P_TYPE(ha))
  1065. set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
  1066. else
  1067. set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
  1068. return;
  1069. }
  1070. sp = req->outstanding_cmds[index];
  1071. if (sp) {
  1072. /* Free outstanding command slot. */
  1073. req->outstanding_cmds[index] = NULL;
  1074. /* Save ISP completion status */
  1075. sp->done(ha, sp, DID_OK << 16);
  1076. } else {
  1077. ql_log(ql_log_warn, vha, 0x3016, "Invalid SCSI SRB.\n");
  1078. if (IS_P3P_TYPE(ha))
  1079. set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
  1080. else
  1081. set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
  1082. }
  1083. }
  1084. srb_t *
  1085. qla2x00_get_sp_from_handle(scsi_qla_host_t *vha, const char *func,
  1086. struct req_que *req, void *iocb)
  1087. {
  1088. struct qla_hw_data *ha = vha->hw;
  1089. sts_entry_t *pkt = iocb;
  1090. srb_t *sp = NULL;
  1091. uint16_t index;
  1092. index = LSW(pkt->handle);
  1093. if (index >= req->num_outstanding_cmds) {
  1094. ql_log(ql_log_warn, vha, 0x5031,
  1095. "Invalid command index (%x).\n", index);
  1096. if (IS_P3P_TYPE(ha))
  1097. set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
  1098. else
  1099. set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
  1100. goto done;
  1101. }
  1102. sp = req->outstanding_cmds[index];
  1103. if (!sp) {
  1104. ql_log(ql_log_warn, vha, 0x5032,
  1105. "Invalid completion handle (%x) -- timed-out.\n", index);
  1106. return sp;
  1107. }
  1108. if (sp->handle != index) {
  1109. ql_log(ql_log_warn, vha, 0x5033,
  1110. "SRB handle (%x) mismatch %x.\n", sp->handle, index);
  1111. return NULL;
  1112. }
  1113. req->outstanding_cmds[index] = NULL;
  1114. done:
  1115. return sp;
  1116. }
  1117. static void
  1118. qla2x00_mbx_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
  1119. struct mbx_entry *mbx)
  1120. {
  1121. const char func[] = "MBX-IOCB";
  1122. const char *type;
  1123. fc_port_t *fcport;
  1124. srb_t *sp;
  1125. struct srb_iocb *lio;
  1126. uint16_t *data;
  1127. uint16_t status;
  1128. sp = qla2x00_get_sp_from_handle(vha, func, req, mbx);
  1129. if (!sp)
  1130. return;
  1131. lio = &sp->u.iocb_cmd;
  1132. type = sp->name;
  1133. fcport = sp->fcport;
  1134. data = lio->u.logio.data;
  1135. data[0] = MBS_COMMAND_ERROR;
  1136. data[1] = lio->u.logio.flags & SRB_LOGIN_RETRIED ?
  1137. QLA_LOGIO_LOGIN_RETRIED : 0;
  1138. if (mbx->entry_status) {
  1139. ql_dbg(ql_dbg_async, vha, 0x5043,
  1140. "Async-%s error entry - hdl=%x portid=%02x%02x%02x "
  1141. "entry-status=%x status=%x state-flag=%x "
  1142. "status-flags=%x.\n", type, sp->handle,
  1143. fcport->d_id.b.domain, fcport->d_id.b.area,
  1144. fcport->d_id.b.al_pa, mbx->entry_status,
  1145. le16_to_cpu(mbx->status), le16_to_cpu(mbx->state_flags),
  1146. le16_to_cpu(mbx->status_flags));
  1147. ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5029,
  1148. (uint8_t *)mbx, sizeof(*mbx));
  1149. goto logio_done;
  1150. }
  1151. status = le16_to_cpu(mbx->status);
  1152. if (status == 0x30 && sp->type == SRB_LOGIN_CMD &&
  1153. le16_to_cpu(mbx->mb0) == MBS_COMMAND_COMPLETE)
  1154. status = 0;
  1155. if (!status && le16_to_cpu(mbx->mb0) == MBS_COMMAND_COMPLETE) {
  1156. ql_dbg(ql_dbg_async, vha, 0x5045,
  1157. "Async-%s complete - hdl=%x portid=%02x%02x%02x mbx1=%x.\n",
  1158. type, sp->handle, fcport->d_id.b.domain,
  1159. fcport->d_id.b.area, fcport->d_id.b.al_pa,
  1160. le16_to_cpu(mbx->mb1));
  1161. data[0] = MBS_COMMAND_COMPLETE;
  1162. if (sp->type == SRB_LOGIN_CMD) {
  1163. fcport->port_type = FCT_TARGET;
  1164. if (le16_to_cpu(mbx->mb1) & BIT_0)
  1165. fcport->port_type = FCT_INITIATOR;
  1166. else if (le16_to_cpu(mbx->mb1) & BIT_1)
  1167. fcport->flags |= FCF_FCP2_DEVICE;
  1168. }
  1169. goto logio_done;
  1170. }
  1171. data[0] = le16_to_cpu(mbx->mb0);
  1172. switch (data[0]) {
  1173. case MBS_PORT_ID_USED:
  1174. data[1] = le16_to_cpu(mbx->mb1);
  1175. break;
  1176. case MBS_LOOP_ID_USED:
  1177. break;
  1178. default:
  1179. data[0] = MBS_COMMAND_ERROR;
  1180. break;
  1181. }
  1182. ql_log(ql_log_warn, vha, 0x5046,
  1183. "Async-%s failed - hdl=%x portid=%02x%02x%02x status=%x "
  1184. "mb0=%x mb1=%x mb2=%x mb6=%x mb7=%x.\n", type, sp->handle,
  1185. fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa,
  1186. status, le16_to_cpu(mbx->mb0), le16_to_cpu(mbx->mb1),
  1187. le16_to_cpu(mbx->mb2), le16_to_cpu(mbx->mb6),
  1188. le16_to_cpu(mbx->mb7));
  1189. logio_done:
  1190. sp->done(vha, sp, 0);
  1191. }
  1192. static void
  1193. qla2x00_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
  1194. sts_entry_t *pkt, int iocb_type)
  1195. {
  1196. const char func[] = "CT_IOCB";
  1197. const char *type;
  1198. srb_t *sp;
  1199. struct fc_bsg_job *bsg_job;
  1200. uint16_t comp_status;
  1201. int res;
  1202. sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
  1203. if (!sp)
  1204. return;
  1205. bsg_job = sp->u.bsg_job;
  1206. type = "ct pass-through";
  1207. comp_status = le16_to_cpu(pkt->comp_status);
  1208. /* return FC_CTELS_STATUS_OK and leave the decoding of the ELS/CT
  1209. * fc payload to the caller
  1210. */
  1211. bsg_job->reply->reply_data.ctels_reply.status = FC_CTELS_STATUS_OK;
  1212. bsg_job->reply_len = sizeof(struct fc_bsg_reply);
  1213. if (comp_status != CS_COMPLETE) {
  1214. if (comp_status == CS_DATA_UNDERRUN) {
  1215. res = DID_OK << 16;
  1216. bsg_job->reply->reply_payload_rcv_len =
  1217. le16_to_cpu(((sts_entry_t *)pkt)->rsp_info_len);
  1218. ql_log(ql_log_warn, vha, 0x5048,
  1219. "CT pass-through-%s error "
  1220. "comp_status-status=0x%x total_byte = 0x%x.\n",
  1221. type, comp_status,
  1222. bsg_job->reply->reply_payload_rcv_len);
  1223. } else {
  1224. ql_log(ql_log_warn, vha, 0x5049,
  1225. "CT pass-through-%s error "
  1226. "comp_status-status=0x%x.\n", type, comp_status);
  1227. res = DID_ERROR << 16;
  1228. bsg_job->reply->reply_payload_rcv_len = 0;
  1229. }
  1230. ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5035,
  1231. (uint8_t *)pkt, sizeof(*pkt));
  1232. } else {
  1233. res = DID_OK << 16;
  1234. bsg_job->reply->reply_payload_rcv_len =
  1235. bsg_job->reply_payload.payload_len;
  1236. bsg_job->reply_len = 0;
  1237. }
  1238. sp->done(vha, sp, res);
  1239. }
  1240. static void
  1241. qla24xx_els_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
  1242. struct sts_entry_24xx *pkt, int iocb_type)
  1243. {
  1244. const char func[] = "ELS_CT_IOCB";
  1245. const char *type;
  1246. srb_t *sp;
  1247. struct fc_bsg_job *bsg_job;
  1248. uint16_t comp_status;
  1249. uint32_t fw_status[3];
  1250. uint8_t* fw_sts_ptr;
  1251. int res;
  1252. sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
  1253. if (!sp)
  1254. return;
  1255. bsg_job = sp->u.bsg_job;
  1256. type = NULL;
  1257. switch (sp->type) {
  1258. case SRB_ELS_CMD_RPT:
  1259. case SRB_ELS_CMD_HST:
  1260. type = "els";
  1261. break;
  1262. case SRB_CT_CMD:
  1263. type = "ct pass-through";
  1264. break;
  1265. case SRB_ELS_DCMD:
  1266. type = "Driver ELS logo";
  1267. ql_dbg(ql_dbg_user, vha, 0x5047,
  1268. "Completing %s: (%p) type=%d.\n", type, sp, sp->type);
  1269. sp->done(vha, sp, 0);
  1270. return;
  1271. default:
  1272. ql_dbg(ql_dbg_user, vha, 0x503e,
  1273. "Unrecognized SRB: (%p) type=%d.\n", sp, sp->type);
  1274. return;
  1275. }
  1276. comp_status = fw_status[0] = le16_to_cpu(pkt->comp_status);
  1277. fw_status[1] = le16_to_cpu(((struct els_sts_entry_24xx*)pkt)->error_subcode_1);
  1278. fw_status[2] = le16_to_cpu(((struct els_sts_entry_24xx*)pkt)->error_subcode_2);
  1279. /* return FC_CTELS_STATUS_OK and leave the decoding of the ELS/CT
  1280. * fc payload to the caller
  1281. */
  1282. bsg_job->reply->reply_data.ctels_reply.status = FC_CTELS_STATUS_OK;
  1283. bsg_job->reply_len = sizeof(struct fc_bsg_reply) + sizeof(fw_status);
  1284. if (comp_status != CS_COMPLETE) {
  1285. if (comp_status == CS_DATA_UNDERRUN) {
  1286. res = DID_OK << 16;
  1287. bsg_job->reply->reply_payload_rcv_len =
  1288. le16_to_cpu(((struct els_sts_entry_24xx *)pkt)->total_byte_count);
  1289. ql_dbg(ql_dbg_user, vha, 0x503f,
  1290. "ELS-CT pass-through-%s error hdl=%x comp_status-status=0x%x "
  1291. "error subcode 1=0x%x error subcode 2=0x%x total_byte = 0x%x.\n",
  1292. type, sp->handle, comp_status, fw_status[1], fw_status[2],
  1293. le16_to_cpu(((struct els_sts_entry_24xx *)
  1294. pkt)->total_byte_count));
  1295. fw_sts_ptr = ((uint8_t*)bsg_job->req->sense) + sizeof(struct fc_bsg_reply);
  1296. memcpy( fw_sts_ptr, fw_status, sizeof(fw_status));
  1297. }
  1298. else {
  1299. ql_dbg(ql_dbg_user, vha, 0x5040,
  1300. "ELS-CT pass-through-%s error hdl=%x comp_status-status=0x%x "
  1301. "error subcode 1=0x%x error subcode 2=0x%x.\n",
  1302. type, sp->handle, comp_status,
  1303. le16_to_cpu(((struct els_sts_entry_24xx *)
  1304. pkt)->error_subcode_1),
  1305. le16_to_cpu(((struct els_sts_entry_24xx *)
  1306. pkt)->error_subcode_2));
  1307. res = DID_ERROR << 16;
  1308. bsg_job->reply->reply_payload_rcv_len = 0;
  1309. fw_sts_ptr = ((uint8_t*)bsg_job->req->sense) + sizeof(struct fc_bsg_reply);
  1310. memcpy( fw_sts_ptr, fw_status, sizeof(fw_status));
  1311. }
  1312. ql_dump_buffer(ql_dbg_user + ql_dbg_buffer, vha, 0x5056,
  1313. (uint8_t *)pkt, sizeof(*pkt));
  1314. }
  1315. else {
  1316. res = DID_OK << 16;
  1317. bsg_job->reply->reply_payload_rcv_len = bsg_job->reply_payload.payload_len;
  1318. bsg_job->reply_len = 0;
  1319. }
  1320. sp->done(vha, sp, res);
  1321. }
  1322. static void
  1323. qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req,
  1324. struct logio_entry_24xx *logio)
  1325. {
  1326. const char func[] = "LOGIO-IOCB";
  1327. const char *type;
  1328. fc_port_t *fcport;
  1329. srb_t *sp;
  1330. struct srb_iocb *lio;
  1331. uint16_t *data;
  1332. uint32_t iop[2];
  1333. sp = qla2x00_get_sp_from_handle(vha, func, req, logio);
  1334. if (!sp)
  1335. return;
  1336. lio = &sp->u.iocb_cmd;
  1337. type = sp->name;
  1338. fcport = sp->fcport;
  1339. data = lio->u.logio.data;
  1340. data[0] = MBS_COMMAND_ERROR;
  1341. data[1] = lio->u.logio.flags & SRB_LOGIN_RETRIED ?
  1342. QLA_LOGIO_LOGIN_RETRIED : 0;
  1343. if (logio->entry_status) {
  1344. ql_log(ql_log_warn, fcport->vha, 0x5034,
  1345. "Async-%s error entry - hdl=%x"
  1346. "portid=%02x%02x%02x entry-status=%x.\n",
  1347. type, sp->handle, fcport->d_id.b.domain,
  1348. fcport->d_id.b.area, fcport->d_id.b.al_pa,
  1349. logio->entry_status);
  1350. ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x504d,
  1351. (uint8_t *)logio, sizeof(*logio));
  1352. goto logio_done;
  1353. }
  1354. if (le16_to_cpu(logio->comp_status) == CS_COMPLETE) {
  1355. ql_dbg(ql_dbg_async, fcport->vha, 0x5036,
  1356. "Async-%s complete - hdl=%x portid=%02x%02x%02x "
  1357. "iop0=%x.\n", type, sp->handle, fcport->d_id.b.domain,
  1358. fcport->d_id.b.area, fcport->d_id.b.al_pa,
  1359. le32_to_cpu(logio->io_parameter[0]));
  1360. data[0] = MBS_COMMAND_COMPLETE;
  1361. if (sp->type != SRB_LOGIN_CMD)
  1362. goto logio_done;
  1363. iop[0] = le32_to_cpu(logio->io_parameter[0]);
  1364. if (iop[0] & BIT_4) {
  1365. fcport->port_type = FCT_TARGET;
  1366. if (iop[0] & BIT_8)
  1367. fcport->flags |= FCF_FCP2_DEVICE;
  1368. } else if (iop[0] & BIT_5)
  1369. fcport->port_type = FCT_INITIATOR;
  1370. if (iop[0] & BIT_7)
  1371. fcport->flags |= FCF_CONF_COMP_SUPPORTED;
  1372. if (logio->io_parameter[7] || logio->io_parameter[8])
  1373. fcport->supported_classes |= FC_COS_CLASS2;
  1374. if (logio->io_parameter[9] || logio->io_parameter[10])
  1375. fcport->supported_classes |= FC_COS_CLASS3;
  1376. goto logio_done;
  1377. }
  1378. iop[0] = le32_to_cpu(logio->io_parameter[0]);
  1379. iop[1] = le32_to_cpu(logio->io_parameter[1]);
  1380. switch (iop[0]) {
  1381. case LSC_SCODE_PORTID_USED:
  1382. data[0] = MBS_PORT_ID_USED;
  1383. data[1] = LSW(iop[1]);
  1384. break;
  1385. case LSC_SCODE_NPORT_USED:
  1386. data[0] = MBS_LOOP_ID_USED;
  1387. break;
  1388. default:
  1389. data[0] = MBS_COMMAND_ERROR;
  1390. break;
  1391. }
  1392. ql_dbg(ql_dbg_async, fcport->vha, 0x5037,
  1393. "Async-%s failed - hdl=%x portid=%02x%02x%02x comp=%x "
  1394. "iop0=%x iop1=%x.\n", type, sp->handle, fcport->d_id.b.domain,
  1395. fcport->d_id.b.area, fcport->d_id.b.al_pa,
  1396. le16_to_cpu(logio->comp_status),
  1397. le32_to_cpu(logio->io_parameter[0]),
  1398. le32_to_cpu(logio->io_parameter[1]));
  1399. logio_done:
  1400. sp->done(vha, sp, 0);
  1401. }
  1402. static void
  1403. qla24xx_tm_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, void *tsk)
  1404. {
  1405. const char func[] = "TMF-IOCB";
  1406. const char *type;
  1407. fc_port_t *fcport;
  1408. srb_t *sp;
  1409. struct srb_iocb *iocb;
  1410. struct sts_entry_24xx *sts = (struct sts_entry_24xx *)tsk;
  1411. sp = qla2x00_get_sp_from_handle(vha, func, req, tsk);
  1412. if (!sp)
  1413. return;
  1414. iocb = &sp->u.iocb_cmd;
  1415. type = sp->name;
  1416. fcport = sp->fcport;
  1417. iocb->u.tmf.data = QLA_SUCCESS;
  1418. if (sts->entry_status) {
  1419. ql_log(ql_log_warn, fcport->vha, 0x5038,
  1420. "Async-%s error - hdl=%x entry-status(%x).\n",
  1421. type, sp->handle, sts->entry_status);
  1422. iocb->u.tmf.data = QLA_FUNCTION_FAILED;
  1423. } else if (sts->comp_status != cpu_to_le16(CS_COMPLETE)) {
  1424. ql_log(ql_log_warn, fcport->vha, 0x5039,
  1425. "Async-%s error - hdl=%x completion status(%x).\n",
  1426. type, sp->handle, sts->comp_status);
  1427. iocb->u.tmf.data = QLA_FUNCTION_FAILED;
  1428. } else if ((le16_to_cpu(sts->scsi_status) &
  1429. SS_RESPONSE_INFO_LEN_VALID)) {
  1430. if (le32_to_cpu(sts->rsp_data_len) < 4) {
  1431. ql_log(ql_log_warn, fcport->vha, 0x503b,
  1432. "Async-%s error - hdl=%x not enough response(%d).\n",
  1433. type, sp->handle, sts->rsp_data_len);
  1434. } else if (sts->data[3]) {
  1435. ql_log(ql_log_warn, fcport->vha, 0x503c,
  1436. "Async-%s error - hdl=%x response(%x).\n",
  1437. type, sp->handle, sts->data[3]);
  1438. iocb->u.tmf.data = QLA_FUNCTION_FAILED;
  1439. }
  1440. }
  1441. if (iocb->u.tmf.data != QLA_SUCCESS)
  1442. ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5055,
  1443. (uint8_t *)sts, sizeof(*sts));
  1444. sp->done(vha, sp, 0);
  1445. }
  1446. /**
  1447. * qla2x00_process_response_queue() - Process response queue entries.
  1448. * @ha: SCSI driver HA context
  1449. */
  1450. void
  1451. qla2x00_process_response_queue(struct rsp_que *rsp)
  1452. {
  1453. struct scsi_qla_host *vha;
  1454. struct qla_hw_data *ha = rsp->hw;
  1455. struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
  1456. sts_entry_t *pkt;
  1457. uint16_t handle_cnt;
  1458. uint16_t cnt;
  1459. vha = pci_get_drvdata(ha->pdev);
  1460. if (!vha->flags.online)
  1461. return;
  1462. while (rsp->ring_ptr->signature != RESPONSE_PROCESSED) {
  1463. pkt = (sts_entry_t *)rsp->ring_ptr;
  1464. rsp->ring_index++;
  1465. if (rsp->ring_index == rsp->length) {
  1466. rsp->ring_index = 0;
  1467. rsp->ring_ptr = rsp->ring;
  1468. } else {
  1469. rsp->ring_ptr++;
  1470. }
  1471. if (pkt->entry_status != 0) {
  1472. qla2x00_error_entry(vha, rsp, pkt);
  1473. ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
  1474. wmb();
  1475. continue;
  1476. }
  1477. switch (pkt->entry_type) {
  1478. case STATUS_TYPE:
  1479. qla2x00_status_entry(vha, rsp, pkt);
  1480. break;
  1481. case STATUS_TYPE_21:
  1482. handle_cnt = ((sts21_entry_t *)pkt)->handle_count;
  1483. for (cnt = 0; cnt < handle_cnt; cnt++) {
  1484. qla2x00_process_completed_request(vha, rsp->req,
  1485. ((sts21_entry_t *)pkt)->handle[cnt]);
  1486. }
  1487. break;
  1488. case STATUS_TYPE_22:
  1489. handle_cnt = ((sts22_entry_t *)pkt)->handle_count;
  1490. for (cnt = 0; cnt < handle_cnt; cnt++) {
  1491. qla2x00_process_completed_request(vha, rsp->req,
  1492. ((sts22_entry_t *)pkt)->handle[cnt]);
  1493. }
  1494. break;
  1495. case STATUS_CONT_TYPE:
  1496. qla2x00_status_cont_entry(rsp, (sts_cont_entry_t *)pkt);
  1497. break;
  1498. case MBX_IOCB_TYPE:
  1499. qla2x00_mbx_iocb_entry(vha, rsp->req,
  1500. (struct mbx_entry *)pkt);
  1501. break;
  1502. case CT_IOCB_TYPE:
  1503. qla2x00_ct_entry(vha, rsp->req, pkt, CT_IOCB_TYPE);
  1504. break;
  1505. default:
  1506. /* Type Not Supported. */
  1507. ql_log(ql_log_warn, vha, 0x504a,
  1508. "Received unknown response pkt type %x "
  1509. "entry status=%x.\n",
  1510. pkt->entry_type, pkt->entry_status);
  1511. break;
  1512. }
  1513. ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
  1514. wmb();
  1515. }
  1516. /* Adjust ring index */
  1517. WRT_REG_WORD(ISP_RSP_Q_OUT(ha, reg), rsp->ring_index);
  1518. }
  1519. static inline void
  1520. qla2x00_handle_sense(srb_t *sp, uint8_t *sense_data, uint32_t par_sense_len,
  1521. uint32_t sense_len, struct rsp_que *rsp, int res)
  1522. {
  1523. struct scsi_qla_host *vha = sp->fcport->vha;
  1524. struct scsi_cmnd *cp = GET_CMD_SP(sp);
  1525. uint32_t track_sense_len;
  1526. if (sense_len >= SCSI_SENSE_BUFFERSIZE)
  1527. sense_len = SCSI_SENSE_BUFFERSIZE;
  1528. SET_CMD_SENSE_LEN(sp, sense_len);
  1529. SET_CMD_SENSE_PTR(sp, cp->sense_buffer);
  1530. track_sense_len = sense_len;
  1531. if (sense_len > par_sense_len)
  1532. sense_len = par_sense_len;
  1533. memcpy(cp->sense_buffer, sense_data, sense_len);
  1534. SET_CMD_SENSE_PTR(sp, cp->sense_buffer + sense_len);
  1535. track_sense_len -= sense_len;
  1536. SET_CMD_SENSE_LEN(sp, track_sense_len);
  1537. if (track_sense_len != 0) {
  1538. rsp->status_srb = sp;
  1539. cp->result = res;
  1540. }
  1541. if (sense_len) {
  1542. ql_dbg(ql_dbg_io + ql_dbg_buffer, vha, 0x301c,
  1543. "Check condition Sense data, nexus%ld:%d:%llu cmd=%p.\n",
  1544. sp->fcport->vha->host_no, cp->device->id, cp->device->lun,
  1545. cp);
  1546. ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x302b,
  1547. cp->sense_buffer, sense_len);
  1548. }
  1549. }
  1550. struct scsi_dif_tuple {
  1551. __be16 guard; /* Checksum */
  1552. __be16 app_tag; /* APPL identifier */
  1553. __be32 ref_tag; /* Target LBA or indirect LBA */
  1554. };
  1555. /*
  1556. * Checks the guard or meta-data for the type of error
  1557. * detected by the HBA. In case of errors, we set the
  1558. * ASC/ASCQ fields in the sense buffer with ILLEGAL_REQUEST
  1559. * to indicate to the kernel that the HBA detected error.
  1560. */
  1561. static inline int
  1562. qla2x00_handle_dif_error(srb_t *sp, struct sts_entry_24xx *sts24)
  1563. {
  1564. struct scsi_qla_host *vha = sp->fcport->vha;
  1565. struct scsi_cmnd *cmd = GET_CMD_SP(sp);
  1566. uint8_t *ap = &sts24->data[12];
  1567. uint8_t *ep = &sts24->data[20];
  1568. uint32_t e_ref_tag, a_ref_tag;
  1569. uint16_t e_app_tag, a_app_tag;
  1570. uint16_t e_guard, a_guard;
  1571. /*
  1572. * swab32 of the "data" field in the beginning of qla2x00_status_entry()
  1573. * would make guard field appear at offset 2
  1574. */
  1575. a_guard = le16_to_cpu(*(uint16_t *)(ap + 2));
  1576. a_app_tag = le16_to_cpu(*(uint16_t *)(ap + 0));
  1577. a_ref_tag = le32_to_cpu(*(uint32_t *)(ap + 4));
  1578. e_guard = le16_to_cpu(*(uint16_t *)(ep + 2));
  1579. e_app_tag = le16_to_cpu(*(uint16_t *)(ep + 0));
  1580. e_ref_tag = le32_to_cpu(*(uint32_t *)(ep + 4));
  1581. ql_dbg(ql_dbg_io, vha, 0x3023,
  1582. "iocb(s) %p Returned STATUS.\n", sts24);
  1583. ql_dbg(ql_dbg_io, vha, 0x3024,
  1584. "DIF ERROR in cmd 0x%x lba 0x%llx act ref"
  1585. " tag=0x%x, exp ref_tag=0x%x, act app tag=0x%x, exp app"
  1586. " tag=0x%x, act guard=0x%x, exp guard=0x%x.\n",
  1587. cmd->cmnd[0], (u64)scsi_get_lba(cmd), a_ref_tag, e_ref_tag,
  1588. a_app_tag, e_app_tag, a_guard, e_guard);
  1589. /*
  1590. * Ignore sector if:
  1591. * For type 3: ref & app tag is all 'f's
  1592. * For type 0,1,2: app tag is all 'f's
  1593. */
  1594. if ((a_app_tag == 0xffff) &&
  1595. ((scsi_get_prot_type(cmd) != SCSI_PROT_DIF_TYPE3) ||
  1596. (a_ref_tag == 0xffffffff))) {
  1597. uint32_t blocks_done, resid;
  1598. sector_t lba_s = scsi_get_lba(cmd);
  1599. /* 2TB boundary case covered automatically with this */
  1600. blocks_done = e_ref_tag - (uint32_t)lba_s + 1;
  1601. resid = scsi_bufflen(cmd) - (blocks_done *
  1602. cmd->device->sector_size);
  1603. scsi_set_resid(cmd, resid);
  1604. cmd->result = DID_OK << 16;
  1605. /* Update protection tag */
  1606. if (scsi_prot_sg_count(cmd)) {
  1607. uint32_t i, j = 0, k = 0, num_ent;
  1608. struct scatterlist *sg;
  1609. struct t10_pi_tuple *spt;
  1610. /* Patch the corresponding protection tags */
  1611. scsi_for_each_prot_sg(cmd, sg,
  1612. scsi_prot_sg_count(cmd), i) {
  1613. num_ent = sg_dma_len(sg) / 8;
  1614. if (k + num_ent < blocks_done) {
  1615. k += num_ent;
  1616. continue;
  1617. }
  1618. j = blocks_done - k - 1;
  1619. k = blocks_done;
  1620. break;
  1621. }
  1622. if (k != blocks_done) {
  1623. ql_log(ql_log_warn, vha, 0x302f,
  1624. "unexpected tag values tag:lba=%x:%llx)\n",
  1625. e_ref_tag, (unsigned long long)lba_s);
  1626. return 1;
  1627. }
  1628. spt = page_address(sg_page(sg)) + sg->offset;
  1629. spt += j;
  1630. spt->app_tag = 0xffff;
  1631. if (scsi_get_prot_type(cmd) == SCSI_PROT_DIF_TYPE3)
  1632. spt->ref_tag = 0xffffffff;
  1633. }
  1634. return 0;
  1635. }
  1636. /* check guard */
  1637. if (e_guard != a_guard) {
  1638. scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
  1639. 0x10, 0x1);
  1640. set_driver_byte(cmd, DRIVER_SENSE);
  1641. set_host_byte(cmd, DID_ABORT);
  1642. cmd->result |= SAM_STAT_CHECK_CONDITION << 1;
  1643. return 1;
  1644. }
  1645. /* check ref tag */
  1646. if (e_ref_tag != a_ref_tag) {
  1647. scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
  1648. 0x10, 0x3);
  1649. set_driver_byte(cmd, DRIVER_SENSE);
  1650. set_host_byte(cmd, DID_ABORT);
  1651. cmd->result |= SAM_STAT_CHECK_CONDITION << 1;
  1652. return 1;
  1653. }
  1654. /* check appl tag */
  1655. if (e_app_tag != a_app_tag) {
  1656. scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
  1657. 0x10, 0x2);
  1658. set_driver_byte(cmd, DRIVER_SENSE);
  1659. set_host_byte(cmd, DID_ABORT);
  1660. cmd->result |= SAM_STAT_CHECK_CONDITION << 1;
  1661. return 1;
  1662. }
  1663. return 1;
  1664. }
  1665. static void
  1666. qla25xx_process_bidir_status_iocb(scsi_qla_host_t *vha, void *pkt,
  1667. struct req_que *req, uint32_t index)
  1668. {
  1669. struct qla_hw_data *ha = vha->hw;
  1670. srb_t *sp;
  1671. uint16_t comp_status;
  1672. uint16_t scsi_status;
  1673. uint16_t thread_id;
  1674. uint32_t rval = EXT_STATUS_OK;
  1675. struct fc_bsg_job *bsg_job = NULL;
  1676. sts_entry_t *sts;
  1677. struct sts_entry_24xx *sts24;
  1678. sts = (sts_entry_t *) pkt;
  1679. sts24 = (struct sts_entry_24xx *) pkt;
  1680. /* Validate handle. */
  1681. if (index >= req->num_outstanding_cmds) {
  1682. ql_log(ql_log_warn, vha, 0x70af,
  1683. "Invalid SCSI completion handle 0x%x.\n", index);
  1684. set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
  1685. return;
  1686. }
  1687. sp = req->outstanding_cmds[index];
  1688. if (sp) {
  1689. /* Free outstanding command slot. */
  1690. req->outstanding_cmds[index] = NULL;
  1691. bsg_job = sp->u.bsg_job;
  1692. } else {
  1693. ql_log(ql_log_warn, vha, 0x70b0,
  1694. "Req:%d: Invalid ISP SCSI completion handle(0x%x)\n",
  1695. req->id, index);
  1696. set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
  1697. return;
  1698. }
  1699. if (IS_FWI2_CAPABLE(ha)) {
  1700. comp_status = le16_to_cpu(sts24->comp_status);
  1701. scsi_status = le16_to_cpu(sts24->scsi_status) & SS_MASK;
  1702. } else {
  1703. comp_status = le16_to_cpu(sts->comp_status);
  1704. scsi_status = le16_to_cpu(sts->scsi_status) & SS_MASK;
  1705. }
  1706. thread_id = bsg_job->request->rqst_data.h_vendor.vendor_cmd[1];
  1707. switch (comp_status) {
  1708. case CS_COMPLETE:
  1709. if (scsi_status == 0) {
  1710. bsg_job->reply->reply_payload_rcv_len =
  1711. bsg_job->reply_payload.payload_len;
  1712. vha->qla_stats.input_bytes +=
  1713. bsg_job->reply->reply_payload_rcv_len;
  1714. vha->qla_stats.input_requests++;
  1715. rval = EXT_STATUS_OK;
  1716. }
  1717. goto done;
  1718. case CS_DATA_OVERRUN:
  1719. ql_dbg(ql_dbg_user, vha, 0x70b1,
  1720. "Command completed with date overrun thread_id=%d\n",
  1721. thread_id);
  1722. rval = EXT_STATUS_DATA_OVERRUN;
  1723. break;
  1724. case CS_DATA_UNDERRUN:
  1725. ql_dbg(ql_dbg_user, vha, 0x70b2,
  1726. "Command completed with date underrun thread_id=%d\n",
  1727. thread_id);
  1728. rval = EXT_STATUS_DATA_UNDERRUN;
  1729. break;
  1730. case CS_BIDIR_RD_OVERRUN:
  1731. ql_dbg(ql_dbg_user, vha, 0x70b3,
  1732. "Command completed with read data overrun thread_id=%d\n",
  1733. thread_id);
  1734. rval = EXT_STATUS_DATA_OVERRUN;
  1735. break;
  1736. case CS_BIDIR_RD_WR_OVERRUN:
  1737. ql_dbg(ql_dbg_user, vha, 0x70b4,
  1738. "Command completed with read and write data overrun "
  1739. "thread_id=%d\n", thread_id);
  1740. rval = EXT_STATUS_DATA_OVERRUN;
  1741. break;
  1742. case CS_BIDIR_RD_OVERRUN_WR_UNDERRUN:
  1743. ql_dbg(ql_dbg_user, vha, 0x70b5,
  1744. "Command completed with read data over and write data "
  1745. "underrun thread_id=%d\n", thread_id);
  1746. rval = EXT_STATUS_DATA_OVERRUN;
  1747. break;
  1748. case CS_BIDIR_RD_UNDERRUN:
  1749. ql_dbg(ql_dbg_user, vha, 0x70b6,
  1750. "Command completed with read data data underrun "
  1751. "thread_id=%d\n", thread_id);
  1752. rval = EXT_STATUS_DATA_UNDERRUN;
  1753. break;
  1754. case CS_BIDIR_RD_UNDERRUN_WR_OVERRUN:
  1755. ql_dbg(ql_dbg_user, vha, 0x70b7,
  1756. "Command completed with read data under and write data "
  1757. "overrun thread_id=%d\n", thread_id);
  1758. rval = EXT_STATUS_DATA_UNDERRUN;
  1759. break;
  1760. case CS_BIDIR_RD_WR_UNDERRUN:
  1761. ql_dbg(ql_dbg_user, vha, 0x70b8,
  1762. "Command completed with read and write data underrun "
  1763. "thread_id=%d\n", thread_id);
  1764. rval = EXT_STATUS_DATA_UNDERRUN;
  1765. break;
  1766. case CS_BIDIR_DMA:
  1767. ql_dbg(ql_dbg_user, vha, 0x70b9,
  1768. "Command completed with data DMA error thread_id=%d\n",
  1769. thread_id);
  1770. rval = EXT_STATUS_DMA_ERR;
  1771. break;
  1772. case CS_TIMEOUT:
  1773. ql_dbg(ql_dbg_user, vha, 0x70ba,
  1774. "Command completed with timeout thread_id=%d\n",
  1775. thread_id);
  1776. rval = EXT_STATUS_TIMEOUT;
  1777. break;
  1778. default:
  1779. ql_dbg(ql_dbg_user, vha, 0x70bb,
  1780. "Command completed with completion status=0x%x "
  1781. "thread_id=%d\n", comp_status, thread_id);
  1782. rval = EXT_STATUS_ERR;
  1783. break;
  1784. }
  1785. bsg_job->reply->reply_payload_rcv_len = 0;
  1786. done:
  1787. /* Return the vendor specific reply to API */
  1788. bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = rval;
  1789. bsg_job->reply_len = sizeof(struct fc_bsg_reply);
  1790. /* Always return DID_OK, bsg will send the vendor specific response
  1791. * in this case only */
  1792. sp->done(vha, sp, (DID_OK << 6));
  1793. }
  1794. /**
  1795. * qla2x00_status_entry() - Process a Status IOCB entry.
  1796. * @ha: SCSI driver HA context
  1797. * @pkt: Entry pointer
  1798. */
  1799. static void
  1800. qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
  1801. {
  1802. srb_t *sp;
  1803. fc_port_t *fcport;
  1804. struct scsi_cmnd *cp;
  1805. sts_entry_t *sts;
  1806. struct sts_entry_24xx *sts24;
  1807. uint16_t comp_status;
  1808. uint16_t scsi_status;
  1809. uint16_t ox_id;
  1810. uint8_t lscsi_status;
  1811. int32_t resid;
  1812. uint32_t sense_len, par_sense_len, rsp_info_len, resid_len,
  1813. fw_resid_len;
  1814. uint8_t *rsp_info, *sense_data;
  1815. struct qla_hw_data *ha = vha->hw;
  1816. uint32_t handle;
  1817. uint16_t que;
  1818. struct req_que *req;
  1819. int logit = 1;
  1820. int res = 0;
  1821. uint16_t state_flags = 0;
  1822. uint16_t retry_delay = 0;
  1823. sts = (sts_entry_t *) pkt;
  1824. sts24 = (struct sts_entry_24xx *) pkt;
  1825. if (IS_FWI2_CAPABLE(ha)) {
  1826. comp_status = le16_to_cpu(sts24->comp_status);
  1827. scsi_status = le16_to_cpu(sts24->scsi_status) & SS_MASK;
  1828. state_flags = le16_to_cpu(sts24->state_flags);
  1829. } else {
  1830. comp_status = le16_to_cpu(sts->comp_status);
  1831. scsi_status = le16_to_cpu(sts->scsi_status) & SS_MASK;
  1832. }
  1833. handle = (uint32_t) LSW(sts->handle);
  1834. que = MSW(sts->handle);
  1835. req = ha->req_q_map[que];
  1836. /* Check for invalid queue pointer */
  1837. if (req == NULL ||
  1838. que >= find_first_zero_bit(ha->req_qid_map, ha->max_req_queues)) {
  1839. ql_dbg(ql_dbg_io, vha, 0x3059,
  1840. "Invalid status handle (0x%x): Bad req pointer. req=%p, "
  1841. "que=%u.\n", sts->handle, req, que);
  1842. return;
  1843. }
  1844. /* Validate handle. */
  1845. if (handle < req->num_outstanding_cmds) {
  1846. sp = req->outstanding_cmds[handle];
  1847. if (!sp) {
  1848. ql_dbg(ql_dbg_io, vha, 0x3075,
  1849. "%s(%ld): Already returned command for status handle (0x%x).\n",
  1850. __func__, vha->host_no, sts->handle);
  1851. return;
  1852. }
  1853. } else {
  1854. ql_dbg(ql_dbg_io, vha, 0x3017,
  1855. "Invalid status handle, out of range (0x%x).\n",
  1856. sts->handle);
  1857. if (!test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)) {
  1858. if (IS_P3P_TYPE(ha))
  1859. set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
  1860. else
  1861. set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
  1862. qla2xxx_wake_dpc(vha);
  1863. }
  1864. return;
  1865. }
  1866. if (unlikely((state_flags & BIT_1) && (sp->type == SRB_BIDI_CMD))) {
  1867. qla25xx_process_bidir_status_iocb(vha, pkt, req, handle);
  1868. return;
  1869. }
  1870. /* Task Management completion. */
  1871. if (sp->type == SRB_TM_CMD) {
  1872. qla24xx_tm_iocb_entry(vha, req, pkt);
  1873. return;
  1874. }
  1875. /* Fast path completion. */
  1876. if (comp_status == CS_COMPLETE && scsi_status == 0) {
  1877. qla2x00_process_completed_request(vha, req, handle);
  1878. return;
  1879. }
  1880. req->outstanding_cmds[handle] = NULL;
  1881. cp = GET_CMD_SP(sp);
  1882. if (cp == NULL) {
  1883. ql_dbg(ql_dbg_io, vha, 0x3018,
  1884. "Command already returned (0x%x/%p).\n",
  1885. sts->handle, sp);
  1886. return;
  1887. }
  1888. lscsi_status = scsi_status & STATUS_MASK;
  1889. fcport = sp->fcport;
  1890. ox_id = 0;
  1891. sense_len = par_sense_len = rsp_info_len = resid_len =
  1892. fw_resid_len = 0;
  1893. if (IS_FWI2_CAPABLE(ha)) {
  1894. if (scsi_status & SS_SENSE_LEN_VALID)
  1895. sense_len = le32_to_cpu(sts24->sense_len);
  1896. if (scsi_status & SS_RESPONSE_INFO_LEN_VALID)
  1897. rsp_info_len = le32_to_cpu(sts24->rsp_data_len);
  1898. if (scsi_status & (SS_RESIDUAL_UNDER | SS_RESIDUAL_OVER))
  1899. resid_len = le32_to_cpu(sts24->rsp_residual_count);
  1900. if (comp_status == CS_DATA_UNDERRUN)
  1901. fw_resid_len = le32_to_cpu(sts24->residual_len);
  1902. rsp_info = sts24->data;
  1903. sense_data = sts24->data;
  1904. host_to_fcp_swap(sts24->data, sizeof(sts24->data));
  1905. ox_id = le16_to_cpu(sts24->ox_id);
  1906. par_sense_len = sizeof(sts24->data);
  1907. /* Valid values of the retry delay timer are 0x1-0xffef */
  1908. if (sts24->retry_delay > 0 && sts24->retry_delay < 0xfff1)
  1909. retry_delay = sts24->retry_delay;
  1910. } else {
  1911. if (scsi_status & SS_SENSE_LEN_VALID)
  1912. sense_len = le16_to_cpu(sts->req_sense_length);
  1913. if (scsi_status & SS_RESPONSE_INFO_LEN_VALID)
  1914. rsp_info_len = le16_to_cpu(sts->rsp_info_len);
  1915. resid_len = le32_to_cpu(sts->residual_length);
  1916. rsp_info = sts->rsp_info;
  1917. sense_data = sts->req_sense_data;
  1918. par_sense_len = sizeof(sts->req_sense_data);
  1919. }
  1920. /* Check for any FCP transport errors. */
  1921. if (scsi_status & SS_RESPONSE_INFO_LEN_VALID) {
  1922. /* Sense data lies beyond any FCP RESPONSE data. */
  1923. if (IS_FWI2_CAPABLE(ha)) {
  1924. sense_data += rsp_info_len;
  1925. par_sense_len -= rsp_info_len;
  1926. }
  1927. if (rsp_info_len > 3 && rsp_info[3]) {
  1928. ql_dbg(ql_dbg_io, fcport->vha, 0x3019,
  1929. "FCP I/O protocol failure (0x%x/0x%x).\n",
  1930. rsp_info_len, rsp_info[3]);
  1931. res = DID_BUS_BUSY << 16;
  1932. goto out;
  1933. }
  1934. }
  1935. /* Check for overrun. */
  1936. if (IS_FWI2_CAPABLE(ha) && comp_status == CS_COMPLETE &&
  1937. scsi_status & SS_RESIDUAL_OVER)
  1938. comp_status = CS_DATA_OVERRUN;
  1939. /*
  1940. * Check retry_delay_timer value if we receive a busy or
  1941. * queue full.
  1942. */
  1943. if (lscsi_status == SAM_STAT_TASK_SET_FULL ||
  1944. lscsi_status == SAM_STAT_BUSY)
  1945. qla2x00_set_retry_delay_timestamp(fcport, retry_delay);
  1946. /*
  1947. * Based on Host and scsi status generate status code for Linux
  1948. */
  1949. switch (comp_status) {
  1950. case CS_COMPLETE:
  1951. case CS_QUEUE_FULL:
  1952. if (scsi_status == 0) {
  1953. res = DID_OK << 16;
  1954. break;
  1955. }
  1956. if (scsi_status & (SS_RESIDUAL_UNDER | SS_RESIDUAL_OVER)) {
  1957. resid = resid_len;
  1958. scsi_set_resid(cp, resid);
  1959. if (!lscsi_status &&
  1960. ((unsigned)(scsi_bufflen(cp) - resid) <
  1961. cp->underflow)) {
  1962. ql_dbg(ql_dbg_io, fcport->vha, 0x301a,
  1963. "Mid-layer underflow "
  1964. "detected (0x%x of 0x%x bytes).\n",
  1965. resid, scsi_bufflen(cp));
  1966. res = DID_ERROR << 16;
  1967. break;
  1968. }
  1969. }
  1970. res = DID_OK << 16 | lscsi_status;
  1971. if (lscsi_status == SAM_STAT_TASK_SET_FULL) {
  1972. ql_dbg(ql_dbg_io, fcport->vha, 0x301b,
  1973. "QUEUE FULL detected.\n");
  1974. break;
  1975. }
  1976. logit = 0;
  1977. if (lscsi_status != SS_CHECK_CONDITION)
  1978. break;
  1979. memset(cp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
  1980. if (!(scsi_status & SS_SENSE_LEN_VALID))
  1981. break;
  1982. qla2x00_handle_sense(sp, sense_data, par_sense_len, sense_len,
  1983. rsp, res);
  1984. break;
  1985. case CS_DATA_UNDERRUN:
  1986. /* Use F/W calculated residual length. */
  1987. resid = IS_FWI2_CAPABLE(ha) ? fw_resid_len : resid_len;
  1988. scsi_set_resid(cp, resid);
  1989. if (scsi_status & SS_RESIDUAL_UNDER) {
  1990. if (IS_FWI2_CAPABLE(ha) && fw_resid_len != resid_len) {
  1991. ql_dbg(ql_dbg_io, fcport->vha, 0x301d,
  1992. "Dropped frame(s) detected "
  1993. "(0x%x of 0x%x bytes).\n",
  1994. resid, scsi_bufflen(cp));
  1995. res = DID_ERROR << 16 | lscsi_status;
  1996. goto check_scsi_status;
  1997. }
  1998. if (!lscsi_status &&
  1999. ((unsigned)(scsi_bufflen(cp) - resid) <
  2000. cp->underflow)) {
  2001. ql_dbg(ql_dbg_io, fcport->vha, 0x301e,
  2002. "Mid-layer underflow "
  2003. "detected (0x%x of 0x%x bytes).\n",
  2004. resid, scsi_bufflen(cp));
  2005. res = DID_ERROR << 16;
  2006. break;
  2007. }
  2008. } else if (lscsi_status != SAM_STAT_TASK_SET_FULL &&
  2009. lscsi_status != SAM_STAT_BUSY) {
  2010. /*
  2011. * scsi status of task set and busy are considered to be
  2012. * task not completed.
  2013. */
  2014. ql_dbg(ql_dbg_io, fcport->vha, 0x301f,
  2015. "Dropped frame(s) detected (0x%x "
  2016. "of 0x%x bytes).\n", resid,
  2017. scsi_bufflen(cp));
  2018. res = DID_ERROR << 16 | lscsi_status;
  2019. goto check_scsi_status;
  2020. } else {
  2021. ql_dbg(ql_dbg_io, fcport->vha, 0x3030,
  2022. "scsi_status: 0x%x, lscsi_status: 0x%x\n",
  2023. scsi_status, lscsi_status);
  2024. }
  2025. res = DID_OK << 16 | lscsi_status;
  2026. logit = 0;
  2027. check_scsi_status:
  2028. /*
  2029. * Check to see if SCSI Status is non zero. If so report SCSI
  2030. * Status.
  2031. */
  2032. if (lscsi_status != 0) {
  2033. if (lscsi_status == SAM_STAT_TASK_SET_FULL) {
  2034. ql_dbg(ql_dbg_io, fcport->vha, 0x3020,
  2035. "QUEUE FULL detected.\n");
  2036. logit = 1;
  2037. break;
  2038. }
  2039. if (lscsi_status != SS_CHECK_CONDITION)
  2040. break;
  2041. memset(cp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
  2042. if (!(scsi_status & SS_SENSE_LEN_VALID))
  2043. break;
  2044. qla2x00_handle_sense(sp, sense_data, par_sense_len,
  2045. sense_len, rsp, res);
  2046. }
  2047. break;
  2048. case CS_PORT_LOGGED_OUT:
  2049. case CS_PORT_CONFIG_CHG:
  2050. case CS_PORT_BUSY:
  2051. case CS_INCOMPLETE:
  2052. case CS_PORT_UNAVAILABLE:
  2053. case CS_TIMEOUT:
  2054. case CS_RESET:
  2055. /*
  2056. * We are going to have the fc class block the rport
  2057. * while we try to recover so instruct the mid layer
  2058. * to requeue until the class decides how to handle this.
  2059. */
  2060. res = DID_TRANSPORT_DISRUPTED << 16;
  2061. if (comp_status == CS_TIMEOUT) {
  2062. if (IS_FWI2_CAPABLE(ha))
  2063. break;
  2064. else if ((le16_to_cpu(sts->status_flags) &
  2065. SF_LOGOUT_SENT) == 0)
  2066. break;
  2067. }
  2068. ql_dbg(ql_dbg_io, fcport->vha, 0x3021,
  2069. "Port to be marked lost on fcport=%02x%02x%02x, current "
  2070. "port state= %s.\n", fcport->d_id.b.domain,
  2071. fcport->d_id.b.area, fcport->d_id.b.al_pa,
  2072. port_state_str[atomic_read(&fcport->state)]);
  2073. if (atomic_read(&fcport->state) == FCS_ONLINE)
  2074. qla2x00_mark_device_lost(fcport->vha, fcport, 1, 1);
  2075. break;
  2076. case CS_ABORTED:
  2077. res = DID_RESET << 16;
  2078. break;
  2079. case CS_DIF_ERROR:
  2080. logit = qla2x00_handle_dif_error(sp, sts24);
  2081. res = cp->result;
  2082. break;
  2083. case CS_TRANSPORT:
  2084. res = DID_ERROR << 16;
  2085. if (!IS_PI_SPLIT_DET_CAPABLE(ha))
  2086. break;
  2087. if (state_flags & BIT_4)
  2088. scmd_printk(KERN_WARNING, cp,
  2089. "Unsupported device '%s' found.\n",
  2090. cp->device->vendor);
  2091. break;
  2092. default:
  2093. res = DID_ERROR << 16;
  2094. break;
  2095. }
  2096. out:
  2097. if (logit)
  2098. ql_dbg(ql_dbg_io, fcport->vha, 0x3022,
  2099. "FCP command status: 0x%x-0x%x (0x%x) nexus=%ld:%d:%llu "
  2100. "portid=%02x%02x%02x oxid=0x%x cdb=%10phN len=0x%x "
  2101. "rsp_info=0x%x resid=0x%x fw_resid=0x%x sp=%p cp=%p.\n",
  2102. comp_status, scsi_status, res, vha->host_no,
  2103. cp->device->id, cp->device->lun, fcport->d_id.b.domain,
  2104. fcport->d_id.b.area, fcport->d_id.b.al_pa, ox_id,
  2105. cp->cmnd, scsi_bufflen(cp), rsp_info_len,
  2106. resid_len, fw_resid_len, sp, cp);
  2107. if (rsp->status_srb == NULL)
  2108. sp->done(ha, sp, res);
  2109. }
  2110. /**
  2111. * qla2x00_status_cont_entry() - Process a Status Continuations entry.
  2112. * @ha: SCSI driver HA context
  2113. * @pkt: Entry pointer
  2114. *
  2115. * Extended sense data.
  2116. */
  2117. static void
  2118. qla2x00_status_cont_entry(struct rsp_que *rsp, sts_cont_entry_t *pkt)
  2119. {
  2120. uint8_t sense_sz = 0;
  2121. struct qla_hw_data *ha = rsp->hw;
  2122. struct scsi_qla_host *vha = pci_get_drvdata(ha->pdev);
  2123. srb_t *sp = rsp->status_srb;
  2124. struct scsi_cmnd *cp;
  2125. uint32_t sense_len;
  2126. uint8_t *sense_ptr;
  2127. if (!sp || !GET_CMD_SENSE_LEN(sp))
  2128. return;
  2129. sense_len = GET_CMD_SENSE_LEN(sp);
  2130. sense_ptr = GET_CMD_SENSE_PTR(sp);
  2131. cp = GET_CMD_SP(sp);
  2132. if (cp == NULL) {
  2133. ql_log(ql_log_warn, vha, 0x3025,
  2134. "cmd is NULL: already returned to OS (sp=%p).\n", sp);
  2135. rsp->status_srb = NULL;
  2136. return;
  2137. }
  2138. if (sense_len > sizeof(pkt->data))
  2139. sense_sz = sizeof(pkt->data);
  2140. else
  2141. sense_sz = sense_len;
  2142. /* Move sense data. */
  2143. if (IS_FWI2_CAPABLE(ha))
  2144. host_to_fcp_swap(pkt->data, sizeof(pkt->data));
  2145. memcpy(sense_ptr, pkt->data, sense_sz);
  2146. ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x302c,
  2147. sense_ptr, sense_sz);
  2148. sense_len -= sense_sz;
  2149. sense_ptr += sense_sz;
  2150. SET_CMD_SENSE_PTR(sp, sense_ptr);
  2151. SET_CMD_SENSE_LEN(sp, sense_len);
  2152. /* Place command on done queue. */
  2153. if (sense_len == 0) {
  2154. rsp->status_srb = NULL;
  2155. sp->done(ha, sp, cp->result);
  2156. }
  2157. }
  2158. /**
  2159. * qla2x00_error_entry() - Process an error entry.
  2160. * @ha: SCSI driver HA context
  2161. * @pkt: Entry pointer
  2162. */
  2163. static void
  2164. qla2x00_error_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, sts_entry_t *pkt)
  2165. {
  2166. srb_t *sp;
  2167. struct qla_hw_data *ha = vha->hw;
  2168. const char func[] = "ERROR-IOCB";
  2169. uint16_t que = MSW(pkt->handle);
  2170. struct req_que *req = NULL;
  2171. int res = DID_ERROR << 16;
  2172. ql_dbg(ql_dbg_async, vha, 0x502a,
  2173. "type of error status in response: 0x%x\n", pkt->entry_status);
  2174. if (que >= ha->max_req_queues || !ha->req_q_map[que])
  2175. goto fatal;
  2176. req = ha->req_q_map[que];
  2177. if (pkt->entry_status & RF_BUSY)
  2178. res = DID_BUS_BUSY << 16;
  2179. if (pkt->entry_type == NOTIFY_ACK_TYPE &&
  2180. pkt->handle == QLA_TGT_SKIP_HANDLE)
  2181. return;
  2182. sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
  2183. if (sp) {
  2184. sp->done(ha, sp, res);
  2185. return;
  2186. }
  2187. fatal:
  2188. ql_log(ql_log_warn, vha, 0x5030,
  2189. "Error entry - invalid handle/queue (%04x).\n", que);
  2190. }
  2191. /**
  2192. * qla24xx_mbx_completion() - Process mailbox command completions.
  2193. * @ha: SCSI driver HA context
  2194. * @mb0: Mailbox0 register
  2195. */
  2196. static void
  2197. qla24xx_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0)
  2198. {
  2199. uint16_t cnt;
  2200. uint32_t mboxes;
  2201. uint16_t __iomem *wptr;
  2202. struct qla_hw_data *ha = vha->hw;
  2203. struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
  2204. /* Read all mbox registers? */
  2205. WARN_ON_ONCE(ha->mbx_count > 32);
  2206. mboxes = (1ULL << ha->mbx_count) - 1;
  2207. if (!ha->mcp)
  2208. ql_dbg(ql_dbg_async, vha, 0x504e, "MBX pointer ERROR.\n");
  2209. else
  2210. mboxes = ha->mcp->in_mb;
  2211. /* Load return mailbox registers. */
  2212. ha->flags.mbox_int = 1;
  2213. ha->mailbox_out[0] = mb0;
  2214. mboxes >>= 1;
  2215. wptr = (uint16_t __iomem *)&reg->mailbox1;
  2216. for (cnt = 1; cnt < ha->mbx_count; cnt++) {
  2217. if (mboxes & BIT_0)
  2218. ha->mailbox_out[cnt] = RD_REG_WORD(wptr);
  2219. mboxes >>= 1;
  2220. wptr++;
  2221. }
  2222. }
  2223. static void
  2224. qla24xx_abort_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
  2225. struct abort_entry_24xx *pkt)
  2226. {
  2227. const char func[] = "ABT_IOCB";
  2228. srb_t *sp;
  2229. struct srb_iocb *abt;
  2230. sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
  2231. if (!sp)
  2232. return;
  2233. abt = &sp->u.iocb_cmd;
  2234. abt->u.abt.comp_status = le32_to_cpu(pkt->nport_handle);
  2235. sp->done(vha, sp, 0);
  2236. }
  2237. /**
  2238. * qla24xx_process_response_queue() - Process response queue entries.
  2239. * @ha: SCSI driver HA context
  2240. */
  2241. void qla24xx_process_response_queue(struct scsi_qla_host *vha,
  2242. struct rsp_que *rsp)
  2243. {
  2244. struct sts_entry_24xx *pkt;
  2245. struct qla_hw_data *ha = vha->hw;
  2246. if (!vha->flags.online)
  2247. return;
  2248. if (rsp->msix && rsp->msix->cpuid != smp_processor_id()) {
  2249. /* if kernel does not notify qla of IRQ's CPU change,
  2250. * then set it here.
  2251. */
  2252. rsp->msix->cpuid = smp_processor_id();
  2253. ha->tgt.rspq_vector_cpuid = rsp->msix->cpuid;
  2254. }
  2255. while (rsp->ring_ptr->signature != RESPONSE_PROCESSED) {
  2256. pkt = (struct sts_entry_24xx *)rsp->ring_ptr;
  2257. rsp->ring_index++;
  2258. if (rsp->ring_index == rsp->length) {
  2259. rsp->ring_index = 0;
  2260. rsp->ring_ptr = rsp->ring;
  2261. } else {
  2262. rsp->ring_ptr++;
  2263. }
  2264. if (pkt->entry_status != 0) {
  2265. qla2x00_error_entry(vha, rsp, (sts_entry_t *) pkt);
  2266. if (qlt_24xx_process_response_error(vha, pkt))
  2267. goto process_err;
  2268. ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
  2269. wmb();
  2270. continue;
  2271. }
  2272. process_err:
  2273. switch (pkt->entry_type) {
  2274. case STATUS_TYPE:
  2275. qla2x00_status_entry(vha, rsp, pkt);
  2276. break;
  2277. case STATUS_CONT_TYPE:
  2278. qla2x00_status_cont_entry(rsp, (sts_cont_entry_t *)pkt);
  2279. break;
  2280. case VP_RPT_ID_IOCB_TYPE:
  2281. qla24xx_report_id_acquisition(vha,
  2282. (struct vp_rpt_id_entry_24xx *)pkt);
  2283. break;
  2284. case LOGINOUT_PORT_IOCB_TYPE:
  2285. qla24xx_logio_entry(vha, rsp->req,
  2286. (struct logio_entry_24xx *)pkt);
  2287. break;
  2288. case CT_IOCB_TYPE:
  2289. qla24xx_els_ct_entry(vha, rsp->req, pkt, CT_IOCB_TYPE);
  2290. break;
  2291. case ELS_IOCB_TYPE:
  2292. qla24xx_els_ct_entry(vha, rsp->req, pkt, ELS_IOCB_TYPE);
  2293. break;
  2294. case ABTS_RECV_24XX:
  2295. if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
  2296. /* ensure that the ATIO queue is empty */
  2297. qlt_handle_abts_recv(vha, (response_t *)pkt);
  2298. break;
  2299. } else {
  2300. /* drop through */
  2301. qlt_24xx_process_atio_queue(vha, 1);
  2302. }
  2303. case ABTS_RESP_24XX:
  2304. case CTIO_TYPE7:
  2305. case NOTIFY_ACK_TYPE:
  2306. case CTIO_CRC2:
  2307. qlt_response_pkt_all_vps(vha, (response_t *)pkt);
  2308. break;
  2309. case MARKER_TYPE:
  2310. /* Do nothing in this case, this check is to prevent it
  2311. * from falling into default case
  2312. */
  2313. break;
  2314. case ABORT_IOCB_TYPE:
  2315. qla24xx_abort_iocb_entry(vha, rsp->req,
  2316. (struct abort_entry_24xx *)pkt);
  2317. break;
  2318. default:
  2319. /* Type Not Supported. */
  2320. ql_dbg(ql_dbg_async, vha, 0x5042,
  2321. "Received unknown response pkt type %x "
  2322. "entry status=%x.\n",
  2323. pkt->entry_type, pkt->entry_status);
  2324. break;
  2325. }
  2326. ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
  2327. wmb();
  2328. }
  2329. /* Adjust ring index */
  2330. if (IS_P3P_TYPE(ha)) {
  2331. struct device_reg_82xx __iomem *reg = &ha->iobase->isp82;
  2332. WRT_REG_DWORD(&reg->rsp_q_out[0], rsp->ring_index);
  2333. } else
  2334. WRT_REG_DWORD(rsp->rsp_q_out, rsp->ring_index);
  2335. }
  2336. static void
  2337. qla2xxx_check_risc_status(scsi_qla_host_t *vha)
  2338. {
  2339. int rval;
  2340. uint32_t cnt;
  2341. struct qla_hw_data *ha = vha->hw;
  2342. struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
  2343. if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha) &&
  2344. !IS_QLA27XX(ha))
  2345. return;
  2346. rval = QLA_SUCCESS;
  2347. WRT_REG_DWORD(&reg->iobase_addr, 0x7C00);
  2348. RD_REG_DWORD(&reg->iobase_addr);
  2349. WRT_REG_DWORD(&reg->iobase_window, 0x0001);
  2350. for (cnt = 10000; (RD_REG_DWORD(&reg->iobase_window) & BIT_0) == 0 &&
  2351. rval == QLA_SUCCESS; cnt--) {
  2352. if (cnt) {
  2353. WRT_REG_DWORD(&reg->iobase_window, 0x0001);
  2354. udelay(10);
  2355. } else
  2356. rval = QLA_FUNCTION_TIMEOUT;
  2357. }
  2358. if (rval == QLA_SUCCESS)
  2359. goto next_test;
  2360. rval = QLA_SUCCESS;
  2361. WRT_REG_DWORD(&reg->iobase_window, 0x0003);
  2362. for (cnt = 100; (RD_REG_DWORD(&reg->iobase_window) & BIT_0) == 0 &&
  2363. rval == QLA_SUCCESS; cnt--) {
  2364. if (cnt) {
  2365. WRT_REG_DWORD(&reg->iobase_window, 0x0003);
  2366. udelay(10);
  2367. } else
  2368. rval = QLA_FUNCTION_TIMEOUT;
  2369. }
  2370. if (rval != QLA_SUCCESS)
  2371. goto done;
  2372. next_test:
  2373. if (RD_REG_DWORD(&reg->iobase_c8) & BIT_3)
  2374. ql_log(ql_log_info, vha, 0x504c,
  2375. "Additional code -- 0x55AA.\n");
  2376. done:
  2377. WRT_REG_DWORD(&reg->iobase_window, 0x0000);
  2378. RD_REG_DWORD(&reg->iobase_window);
  2379. }
  2380. /**
  2381. * qla24xx_intr_handler() - Process interrupts for the ISP23xx and ISP24xx.
  2382. * @irq:
  2383. * @dev_id: SCSI driver HA context
  2384. *
  2385. * Called by system whenever the host adapter generates an interrupt.
  2386. *
  2387. * Returns handled flag.
  2388. */
  2389. irqreturn_t
  2390. qla24xx_intr_handler(int irq, void *dev_id)
  2391. {
  2392. scsi_qla_host_t *vha;
  2393. struct qla_hw_data *ha;
  2394. struct device_reg_24xx __iomem *reg;
  2395. int status;
  2396. unsigned long iter;
  2397. uint32_t stat;
  2398. uint32_t hccr;
  2399. uint16_t mb[8];
  2400. struct rsp_que *rsp;
  2401. unsigned long flags;
  2402. rsp = (struct rsp_que *) dev_id;
  2403. if (!rsp) {
  2404. ql_log(ql_log_info, NULL, 0x5059,
  2405. "%s: NULL response queue pointer.\n", __func__);
  2406. return IRQ_NONE;
  2407. }
  2408. ha = rsp->hw;
  2409. reg = &ha->iobase->isp24;
  2410. status = 0;
  2411. if (unlikely(pci_channel_offline(ha->pdev)))
  2412. return IRQ_HANDLED;
  2413. spin_lock_irqsave(&ha->hardware_lock, flags);
  2414. vha = pci_get_drvdata(ha->pdev);
  2415. for (iter = 50; iter--; ) {
  2416. stat = RD_REG_DWORD(&reg->host_status);
  2417. if (qla2x00_check_reg32_for_disconnect(vha, stat))
  2418. break;
  2419. if (stat & HSRX_RISC_PAUSED) {
  2420. if (unlikely(pci_channel_offline(ha->pdev)))
  2421. break;
  2422. hccr = RD_REG_DWORD(&reg->hccr);
  2423. ql_log(ql_log_warn, vha, 0x504b,
  2424. "RISC paused -- HCCR=%x, Dumping firmware.\n",
  2425. hccr);
  2426. qla2xxx_check_risc_status(vha);
  2427. ha->isp_ops->fw_dump(vha, 1);
  2428. set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
  2429. break;
  2430. } else if ((stat & HSRX_RISC_INT) == 0)
  2431. break;
  2432. switch (stat & 0xff) {
  2433. case INTR_ROM_MB_SUCCESS:
  2434. case INTR_ROM_MB_FAILED:
  2435. case INTR_MB_SUCCESS:
  2436. case INTR_MB_FAILED:
  2437. qla24xx_mbx_completion(vha, MSW(stat));
  2438. status |= MBX_INTERRUPT;
  2439. break;
  2440. case INTR_ASYNC_EVENT:
  2441. mb[0] = MSW(stat);
  2442. mb[1] = RD_REG_WORD(&reg->mailbox1);
  2443. mb[2] = RD_REG_WORD(&reg->mailbox2);
  2444. mb[3] = RD_REG_WORD(&reg->mailbox3);
  2445. qla2x00_async_event(vha, rsp, mb);
  2446. break;
  2447. case INTR_RSP_QUE_UPDATE:
  2448. case INTR_RSP_QUE_UPDATE_83XX:
  2449. qla24xx_process_response_queue(vha, rsp);
  2450. break;
  2451. case INTR_ATIO_QUE_UPDATE:{
  2452. unsigned long flags2;
  2453. spin_lock_irqsave(&ha->tgt.atio_lock, flags2);
  2454. qlt_24xx_process_atio_queue(vha, 1);
  2455. spin_unlock_irqrestore(&ha->tgt.atio_lock, flags2);
  2456. break;
  2457. }
  2458. case INTR_ATIO_RSP_QUE_UPDATE: {
  2459. unsigned long flags2;
  2460. spin_lock_irqsave(&ha->tgt.atio_lock, flags2);
  2461. qlt_24xx_process_atio_queue(vha, 1);
  2462. spin_unlock_irqrestore(&ha->tgt.atio_lock, flags2);
  2463. qla24xx_process_response_queue(vha, rsp);
  2464. break;
  2465. }
  2466. default:
  2467. ql_dbg(ql_dbg_async, vha, 0x504f,
  2468. "Unrecognized interrupt type (%d).\n", stat * 0xff);
  2469. break;
  2470. }
  2471. WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
  2472. RD_REG_DWORD_RELAXED(&reg->hccr);
  2473. if (unlikely(IS_QLA83XX(ha) && (ha->pdev->revision == 1)))
  2474. ndelay(3500);
  2475. }
  2476. qla2x00_handle_mbx_completion(ha, status);
  2477. spin_unlock_irqrestore(&ha->hardware_lock, flags);
  2478. return IRQ_HANDLED;
  2479. }
  2480. static irqreturn_t
  2481. qla24xx_msix_rsp_q(int irq, void *dev_id)
  2482. {
  2483. struct qla_hw_data *ha;
  2484. struct rsp_que *rsp;
  2485. struct device_reg_24xx __iomem *reg;
  2486. struct scsi_qla_host *vha;
  2487. unsigned long flags;
  2488. uint32_t stat = 0;
  2489. rsp = (struct rsp_que *) dev_id;
  2490. if (!rsp) {
  2491. ql_log(ql_log_info, NULL, 0x505a,
  2492. "%s: NULL response queue pointer.\n", __func__);
  2493. return IRQ_NONE;
  2494. }
  2495. ha = rsp->hw;
  2496. reg = &ha->iobase->isp24;
  2497. spin_lock_irqsave(&ha->hardware_lock, flags);
  2498. vha = pci_get_drvdata(ha->pdev);
  2499. /*
  2500. * Use host_status register to check to PCI disconnection before we
  2501. * we process the response queue.
  2502. */
  2503. stat = RD_REG_DWORD(&reg->host_status);
  2504. if (qla2x00_check_reg32_for_disconnect(vha, stat))
  2505. goto out;
  2506. qla24xx_process_response_queue(vha, rsp);
  2507. if (!ha->flags.disable_msix_handshake) {
  2508. WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
  2509. RD_REG_DWORD_RELAXED(&reg->hccr);
  2510. }
  2511. out:
  2512. spin_unlock_irqrestore(&ha->hardware_lock, flags);
  2513. return IRQ_HANDLED;
  2514. }
  2515. static irqreturn_t
  2516. qla25xx_msix_rsp_q(int irq, void *dev_id)
  2517. {
  2518. struct qla_hw_data *ha;
  2519. scsi_qla_host_t *vha;
  2520. struct rsp_que *rsp;
  2521. struct device_reg_24xx __iomem *reg;
  2522. unsigned long flags;
  2523. uint32_t hccr = 0;
  2524. rsp = (struct rsp_que *) dev_id;
  2525. if (!rsp) {
  2526. ql_log(ql_log_info, NULL, 0x505b,
  2527. "%s: NULL response queue pointer.\n", __func__);
  2528. return IRQ_NONE;
  2529. }
  2530. ha = rsp->hw;
  2531. vha = pci_get_drvdata(ha->pdev);
  2532. /* Clear the interrupt, if enabled, for this response queue */
  2533. if (!ha->flags.disable_msix_handshake) {
  2534. reg = &ha->iobase->isp24;
  2535. spin_lock_irqsave(&ha->hardware_lock, flags);
  2536. WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
  2537. hccr = RD_REG_DWORD_RELAXED(&reg->hccr);
  2538. spin_unlock_irqrestore(&ha->hardware_lock, flags);
  2539. }
  2540. if (qla2x00_check_reg32_for_disconnect(vha, hccr))
  2541. goto out;
  2542. queue_work_on((int) (rsp->id - 1), ha->wq, &rsp->q_work);
  2543. out:
  2544. return IRQ_HANDLED;
  2545. }
  2546. static irqreturn_t
  2547. qla24xx_msix_default(int irq, void *dev_id)
  2548. {
  2549. scsi_qla_host_t *vha;
  2550. struct qla_hw_data *ha;
  2551. struct rsp_que *rsp;
  2552. struct device_reg_24xx __iomem *reg;
  2553. int status;
  2554. uint32_t stat;
  2555. uint32_t hccr;
  2556. uint16_t mb[8];
  2557. unsigned long flags;
  2558. rsp = (struct rsp_que *) dev_id;
  2559. if (!rsp) {
  2560. ql_log(ql_log_info, NULL, 0x505c,
  2561. "%s: NULL response queue pointer.\n", __func__);
  2562. return IRQ_NONE;
  2563. }
  2564. ha = rsp->hw;
  2565. reg = &ha->iobase->isp24;
  2566. status = 0;
  2567. spin_lock_irqsave(&ha->hardware_lock, flags);
  2568. vha = pci_get_drvdata(ha->pdev);
  2569. do {
  2570. stat = RD_REG_DWORD(&reg->host_status);
  2571. if (qla2x00_check_reg32_for_disconnect(vha, stat))
  2572. break;
  2573. if (stat & HSRX_RISC_PAUSED) {
  2574. if (unlikely(pci_channel_offline(ha->pdev)))
  2575. break;
  2576. hccr = RD_REG_DWORD(&reg->hccr);
  2577. ql_log(ql_log_info, vha, 0x5050,
  2578. "RISC paused -- HCCR=%x, Dumping firmware.\n",
  2579. hccr);
  2580. qla2xxx_check_risc_status(vha);
  2581. ha->isp_ops->fw_dump(vha, 1);
  2582. set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
  2583. break;
  2584. } else if ((stat & HSRX_RISC_INT) == 0)
  2585. break;
  2586. switch (stat & 0xff) {
  2587. case INTR_ROM_MB_SUCCESS:
  2588. case INTR_ROM_MB_FAILED:
  2589. case INTR_MB_SUCCESS:
  2590. case INTR_MB_FAILED:
  2591. qla24xx_mbx_completion(vha, MSW(stat));
  2592. status |= MBX_INTERRUPT;
  2593. break;
  2594. case INTR_ASYNC_EVENT:
  2595. mb[0] = MSW(stat);
  2596. mb[1] = RD_REG_WORD(&reg->mailbox1);
  2597. mb[2] = RD_REG_WORD(&reg->mailbox2);
  2598. mb[3] = RD_REG_WORD(&reg->mailbox3);
  2599. qla2x00_async_event(vha, rsp, mb);
  2600. break;
  2601. case INTR_RSP_QUE_UPDATE:
  2602. case INTR_RSP_QUE_UPDATE_83XX:
  2603. qla24xx_process_response_queue(vha, rsp);
  2604. break;
  2605. case INTR_ATIO_QUE_UPDATE:{
  2606. unsigned long flags2;
  2607. spin_lock_irqsave(&ha->tgt.atio_lock, flags2);
  2608. qlt_24xx_process_atio_queue(vha, 1);
  2609. spin_unlock_irqrestore(&ha->tgt.atio_lock, flags2);
  2610. break;
  2611. }
  2612. case INTR_ATIO_RSP_QUE_UPDATE: {
  2613. unsigned long flags2;
  2614. spin_lock_irqsave(&ha->tgt.atio_lock, flags2);
  2615. qlt_24xx_process_atio_queue(vha, 1);
  2616. spin_unlock_irqrestore(&ha->tgt.atio_lock, flags2);
  2617. qla24xx_process_response_queue(vha, rsp);
  2618. break;
  2619. }
  2620. default:
  2621. ql_dbg(ql_dbg_async, vha, 0x5051,
  2622. "Unrecognized interrupt type (%d).\n", stat & 0xff);
  2623. break;
  2624. }
  2625. WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
  2626. } while (0);
  2627. qla2x00_handle_mbx_completion(ha, status);
  2628. spin_unlock_irqrestore(&ha->hardware_lock, flags);
  2629. return IRQ_HANDLED;
  2630. }
  2631. /* Interrupt handling helpers. */
  2632. struct qla_init_msix_entry {
  2633. const char *name;
  2634. irq_handler_t handler;
  2635. };
  2636. static struct qla_init_msix_entry msix_entries[3] = {
  2637. { "qla2xxx (default)", qla24xx_msix_default },
  2638. { "qla2xxx (rsp_q)", qla24xx_msix_rsp_q },
  2639. { "qla2xxx (multiq)", qla25xx_msix_rsp_q },
  2640. };
  2641. static struct qla_init_msix_entry qla82xx_msix_entries[2] = {
  2642. { "qla2xxx (default)", qla82xx_msix_default },
  2643. { "qla2xxx (rsp_q)", qla82xx_msix_rsp_q },
  2644. };
  2645. static struct qla_init_msix_entry qla83xx_msix_entries[3] = {
  2646. { "qla2xxx (default)", qla24xx_msix_default },
  2647. { "qla2xxx (rsp_q)", qla24xx_msix_rsp_q },
  2648. { "qla2xxx (atio_q)", qla83xx_msix_atio_q },
  2649. };
  2650. static void
  2651. qla24xx_disable_msix(struct qla_hw_data *ha)
  2652. {
  2653. int i;
  2654. struct qla_msix_entry *qentry;
  2655. scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
  2656. for (i = 0; i < ha->msix_count; i++) {
  2657. qentry = &ha->msix_entries[i];
  2658. if (qentry->have_irq) {
  2659. /* un-register irq cpu affinity notification */
  2660. irq_set_affinity_notifier(qentry->vector, NULL);
  2661. free_irq(qentry->vector, qentry->rsp);
  2662. }
  2663. }
  2664. pci_disable_msix(ha->pdev);
  2665. kfree(ha->msix_entries);
  2666. ha->msix_entries = NULL;
  2667. ha->flags.msix_enabled = 0;
  2668. ql_dbg(ql_dbg_init, vha, 0x0042,
  2669. "Disabled the MSI.\n");
  2670. }
  2671. static int
  2672. qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp)
  2673. {
  2674. #define MIN_MSIX_COUNT 2
  2675. #define ATIO_VECTOR 2
  2676. int i, ret;
  2677. struct msix_entry *entries;
  2678. struct qla_msix_entry *qentry;
  2679. scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
  2680. entries = kzalloc(sizeof(struct msix_entry) * ha->msix_count,
  2681. GFP_KERNEL);
  2682. if (!entries) {
  2683. ql_log(ql_log_warn, vha, 0x00bc,
  2684. "Failed to allocate memory for msix_entry.\n");
  2685. return -ENOMEM;
  2686. }
  2687. for (i = 0; i < ha->msix_count; i++)
  2688. entries[i].entry = i;
  2689. ret = pci_enable_msix_range(ha->pdev,
  2690. entries, MIN_MSIX_COUNT, ha->msix_count);
  2691. if (ret < 0) {
  2692. ql_log(ql_log_fatal, vha, 0x00c7,
  2693. "MSI-X: Failed to enable support, "
  2694. "giving up -- %d/%d.\n",
  2695. ha->msix_count, ret);
  2696. goto msix_out;
  2697. } else if (ret < ha->msix_count) {
  2698. ql_log(ql_log_warn, vha, 0x00c6,
  2699. "MSI-X: Failed to enable support "
  2700. "-- %d/%d\n Retry with %d vectors.\n",
  2701. ha->msix_count, ret, ret);
  2702. ha->msix_count = ret;
  2703. ha->max_rsp_queues = ha->msix_count - 1;
  2704. }
  2705. ha->msix_entries = kzalloc(sizeof(struct qla_msix_entry) *
  2706. ha->msix_count, GFP_KERNEL);
  2707. if (!ha->msix_entries) {
  2708. ql_log(ql_log_fatal, vha, 0x00c8,
  2709. "Failed to allocate memory for ha->msix_entries.\n");
  2710. ret = -ENOMEM;
  2711. goto msix_out;
  2712. }
  2713. ha->flags.msix_enabled = 1;
  2714. for (i = 0; i < ha->msix_count; i++) {
  2715. qentry = &ha->msix_entries[i];
  2716. qentry->vector = entries[i].vector;
  2717. qentry->entry = entries[i].entry;
  2718. qentry->have_irq = 0;
  2719. qentry->rsp = NULL;
  2720. qentry->irq_notify.notify = qla_irq_affinity_notify;
  2721. qentry->irq_notify.release = qla_irq_affinity_release;
  2722. qentry->cpuid = -1;
  2723. }
  2724. /* Enable MSI-X vectors for the base queue */
  2725. for (i = 0; i < 2; i++) {
  2726. qentry = &ha->msix_entries[i];
  2727. qentry->rsp = rsp;
  2728. rsp->msix = qentry;
  2729. if (IS_P3P_TYPE(ha))
  2730. ret = request_irq(qentry->vector,
  2731. qla82xx_msix_entries[i].handler,
  2732. 0, qla82xx_msix_entries[i].name, rsp);
  2733. else
  2734. ret = request_irq(qentry->vector,
  2735. msix_entries[i].handler,
  2736. 0, msix_entries[i].name, rsp);
  2737. if (ret)
  2738. goto msix_register_fail;
  2739. qentry->have_irq = 1;
  2740. /* Register for CPU affinity notification. */
  2741. irq_set_affinity_notifier(qentry->vector, &qentry->irq_notify);
  2742. /* Schedule work (ie. trigger a notification) to read cpu
  2743. * mask for this specific irq.
  2744. * kref_get is required because
  2745. * irq_affinity_notify() will do
  2746. * kref_put().
  2747. */
  2748. kref_get(&qentry->irq_notify.kref);
  2749. schedule_work(&qentry->irq_notify.work);
  2750. }
  2751. /*
  2752. * If target mode is enable, also request the vector for the ATIO
  2753. * queue.
  2754. */
  2755. if (QLA_TGT_MODE_ENABLED() && IS_ATIO_MSIX_CAPABLE(ha)) {
  2756. qentry = &ha->msix_entries[ATIO_VECTOR];
  2757. qentry->rsp = rsp;
  2758. rsp->msix = qentry;
  2759. ret = request_irq(qentry->vector,
  2760. qla83xx_msix_entries[ATIO_VECTOR].handler,
  2761. 0, qla83xx_msix_entries[ATIO_VECTOR].name, rsp);
  2762. qentry->have_irq = 1;
  2763. }
  2764. msix_register_fail:
  2765. if (ret) {
  2766. ql_log(ql_log_fatal, vha, 0x00cb,
  2767. "MSI-X: unable to register handler -- %x/%d.\n",
  2768. qentry->vector, ret);
  2769. qla24xx_disable_msix(ha);
  2770. ha->mqenable = 0;
  2771. goto msix_out;
  2772. }
  2773. /* Enable MSI-X vector for response queue update for queue 0 */
  2774. if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
  2775. if (ha->msixbase && ha->mqiobase &&
  2776. (ha->max_rsp_queues > 1 || ha->max_req_queues > 1))
  2777. ha->mqenable = 1;
  2778. } else
  2779. if (ha->mqiobase
  2780. && (ha->max_rsp_queues > 1 || ha->max_req_queues > 1))
  2781. ha->mqenable = 1;
  2782. ql_dbg(ql_dbg_multiq, vha, 0xc005,
  2783. "mqiobase=%p, max_rsp_queues=%d, max_req_queues=%d.\n",
  2784. ha->mqiobase, ha->max_rsp_queues, ha->max_req_queues);
  2785. ql_dbg(ql_dbg_init, vha, 0x0055,
  2786. "mqiobase=%p, max_rsp_queues=%d, max_req_queues=%d.\n",
  2787. ha->mqiobase, ha->max_rsp_queues, ha->max_req_queues);
  2788. msix_out:
  2789. kfree(entries);
  2790. return ret;
  2791. }
  2792. int
  2793. qla2x00_request_irqs(struct qla_hw_data *ha, struct rsp_que *rsp)
  2794. {
  2795. int ret = QLA_FUNCTION_FAILED;
  2796. device_reg_t *reg = ha->iobase;
  2797. scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
  2798. /* If possible, enable MSI-X. */
  2799. if (!IS_QLA2432(ha) && !IS_QLA2532(ha) && !IS_QLA8432(ha) &&
  2800. !IS_CNA_CAPABLE(ha) && !IS_QLA2031(ha) && !IS_QLAFX00(ha) &&
  2801. !IS_QLA27XX(ha))
  2802. goto skip_msi;
  2803. if (ha->pdev->subsystem_vendor == PCI_VENDOR_ID_HP &&
  2804. (ha->pdev->subsystem_device == 0x7040 ||
  2805. ha->pdev->subsystem_device == 0x7041 ||
  2806. ha->pdev->subsystem_device == 0x1705)) {
  2807. ql_log(ql_log_warn, vha, 0x0034,
  2808. "MSI-X: Unsupported ISP 2432 SSVID/SSDID (0x%X,0x%X).\n",
  2809. ha->pdev->subsystem_vendor,
  2810. ha->pdev->subsystem_device);
  2811. goto skip_msi;
  2812. }
  2813. if (IS_QLA2432(ha) && (ha->pdev->revision < QLA_MSIX_CHIP_REV_24XX)) {
  2814. ql_log(ql_log_warn, vha, 0x0035,
  2815. "MSI-X; Unsupported ISP2432 (0x%X, 0x%X).\n",
  2816. ha->pdev->revision, QLA_MSIX_CHIP_REV_24XX);
  2817. goto skip_msix;
  2818. }
  2819. ret = qla24xx_enable_msix(ha, rsp);
  2820. if (!ret) {
  2821. ql_dbg(ql_dbg_init, vha, 0x0036,
  2822. "MSI-X: Enabled (0x%X, 0x%X).\n",
  2823. ha->chip_revision, ha->fw_attributes);
  2824. goto clear_risc_ints;
  2825. }
  2826. skip_msix:
  2827. ql_log(ql_log_info, vha, 0x0037,
  2828. "Falling back-to MSI mode -%d.\n", ret);
  2829. if (!IS_QLA24XX(ha) && !IS_QLA2532(ha) && !IS_QLA8432(ha) &&
  2830. !IS_QLA8001(ha) && !IS_P3P_TYPE(ha) && !IS_QLAFX00(ha) &&
  2831. !IS_QLA27XX(ha))
  2832. goto skip_msi;
  2833. ret = pci_enable_msi(ha->pdev);
  2834. if (!ret) {
  2835. ql_dbg(ql_dbg_init, vha, 0x0038,
  2836. "MSI: Enabled.\n");
  2837. ha->flags.msi_enabled = 1;
  2838. } else
  2839. ql_log(ql_log_warn, vha, 0x0039,
  2840. "Falling back-to INTa mode -- %d.\n", ret);
  2841. skip_msi:
  2842. /* Skip INTx on ISP82xx. */
  2843. if (!ha->flags.msi_enabled && IS_QLA82XX(ha))
  2844. return QLA_FUNCTION_FAILED;
  2845. ret = request_irq(ha->pdev->irq, ha->isp_ops->intr_handler,
  2846. ha->flags.msi_enabled ? 0 : IRQF_SHARED,
  2847. QLA2XXX_DRIVER_NAME, rsp);
  2848. if (ret) {
  2849. ql_log(ql_log_warn, vha, 0x003a,
  2850. "Failed to reserve interrupt %d already in use.\n",
  2851. ha->pdev->irq);
  2852. goto fail;
  2853. } else if (!ha->flags.msi_enabled) {
  2854. ql_dbg(ql_dbg_init, vha, 0x0125,
  2855. "INTa mode: Enabled.\n");
  2856. ha->flags.mr_intr_valid = 1;
  2857. }
  2858. clear_risc_ints:
  2859. if (IS_FWI2_CAPABLE(ha) || IS_QLAFX00(ha))
  2860. goto fail;
  2861. spin_lock_irq(&ha->hardware_lock);
  2862. WRT_REG_WORD(&reg->isp.semaphore, 0);
  2863. spin_unlock_irq(&ha->hardware_lock);
  2864. fail:
  2865. return ret;
  2866. }
  2867. void
  2868. qla2x00_free_irqs(scsi_qla_host_t *vha)
  2869. {
  2870. struct qla_hw_data *ha = vha->hw;
  2871. struct rsp_que *rsp;
  2872. /*
  2873. * We need to check that ha->rsp_q_map is valid in case we are called
  2874. * from a probe failure context.
  2875. */
  2876. if (!ha->rsp_q_map || !ha->rsp_q_map[0])
  2877. return;
  2878. rsp = ha->rsp_q_map[0];
  2879. if (ha->flags.msix_enabled)
  2880. qla24xx_disable_msix(ha);
  2881. else if (ha->flags.msi_enabled) {
  2882. free_irq(ha->pdev->irq, rsp);
  2883. pci_disable_msi(ha->pdev);
  2884. } else
  2885. free_irq(ha->pdev->irq, rsp);
  2886. }
  2887. int qla25xx_request_irq(struct rsp_que *rsp)
  2888. {
  2889. struct qla_hw_data *ha = rsp->hw;
  2890. struct qla_init_msix_entry *intr = &msix_entries[2];
  2891. struct qla_msix_entry *msix = rsp->msix;
  2892. scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
  2893. int ret;
  2894. ret = request_irq(msix->vector, intr->handler, 0, intr->name, rsp);
  2895. if (ret) {
  2896. ql_log(ql_log_fatal, vha, 0x00e6,
  2897. "MSI-X: Unable to register handler -- %x/%d.\n",
  2898. msix->vector, ret);
  2899. return ret;
  2900. }
  2901. msix->have_irq = 1;
  2902. msix->rsp = rsp;
  2903. return ret;
  2904. }
  2905. /* irq_set_affinity/irqbalance will trigger notification of cpu mask update */
  2906. static void qla_irq_affinity_notify(struct irq_affinity_notify *notify,
  2907. const cpumask_t *mask)
  2908. {
  2909. struct qla_msix_entry *e =
  2910. container_of(notify, struct qla_msix_entry, irq_notify);
  2911. struct qla_hw_data *ha;
  2912. struct scsi_qla_host *base_vha;
  2913. /* user is recommended to set mask to just 1 cpu */
  2914. e->cpuid = cpumask_first(mask);
  2915. ha = e->rsp->hw;
  2916. base_vha = pci_get_drvdata(ha->pdev);
  2917. ql_dbg(ql_dbg_init, base_vha, 0xffff,
  2918. "%s: host %ld : vector %d cpu %d \n", __func__,
  2919. base_vha->host_no, e->vector, e->cpuid);
  2920. if (e->have_irq) {
  2921. if ((IS_QLA83XX(ha) || IS_QLA27XX(ha)) &&
  2922. (e->entry == QLA83XX_RSPQ_MSIX_ENTRY_NUMBER)) {
  2923. ha->tgt.rspq_vector_cpuid = e->cpuid;
  2924. ql_dbg(ql_dbg_init, base_vha, 0xffff,
  2925. "%s: host%ld: rspq vector %d cpu %d runtime change\n",
  2926. __func__, base_vha->host_no, e->vector, e->cpuid);
  2927. }
  2928. }
  2929. }
  2930. static void qla_irq_affinity_release(struct kref *ref)
  2931. {
  2932. struct irq_affinity_notify *notify =
  2933. container_of(ref, struct irq_affinity_notify, kref);
  2934. struct qla_msix_entry *e =
  2935. container_of(notify, struct qla_msix_entry, irq_notify);
  2936. struct scsi_qla_host *base_vha = pci_get_drvdata(e->rsp->hw->pdev);
  2937. ql_dbg(ql_dbg_init, base_vha, 0xffff,
  2938. "%s: host%ld: vector %d cpu %d \n", __func__,
  2939. base_vha->host_no, e->vector, e->cpuid);
  2940. }