port.c 52 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765
  1. /*
  2. * This file is provided under a dual BSD/GPLv2 license. When using or
  3. * redistributing this file, you may do so under either license.
  4. *
  5. * GPL LICENSE SUMMARY
  6. *
  7. * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
  8. *
  9. * This program is free software; you can redistribute it and/or modify
  10. * it under the terms of version 2 of the GNU General Public License as
  11. * published by the Free Software Foundation.
  12. *
  13. * This program is distributed in the hope that it will be useful, but
  14. * WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  16. * General Public License for more details.
  17. *
  18. * You should have received a copy of the GNU General Public License
  19. * along with this program; if not, write to the Free Software
  20. * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
  21. * The full GNU General Public License is included in this distribution
  22. * in the file called LICENSE.GPL.
  23. *
  24. * BSD LICENSE
  25. *
  26. * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
  27. * All rights reserved.
  28. *
  29. * Redistribution and use in source and binary forms, with or without
  30. * modification, are permitted provided that the following conditions
  31. * are met:
  32. *
  33. * * Redistributions of source code must retain the above copyright
  34. * notice, this list of conditions and the following disclaimer.
  35. * * Redistributions in binary form must reproduce the above copyright
  36. * notice, this list of conditions and the following disclaimer in
  37. * the documentation and/or other materials provided with the
  38. * distribution.
  39. * * Neither the name of Intel Corporation nor the names of its
  40. * contributors may be used to endorse or promote products derived
  41. * from this software without specific prior written permission.
  42. *
  43. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
  44. * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
  45. * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
  46. * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
  47. * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
  48. * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
  49. * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  50. * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  51. * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  52. * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  53. * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  54. */
  55. #include "isci.h"
  56. #include "port.h"
  57. #include "request.h"
  58. #define SCIC_SDS_PORT_HARD_RESET_TIMEOUT (1000)
  59. #define SCU_DUMMY_INDEX (0xFFFF)
  60. #undef C
  61. #define C(a) (#a)
  62. const char *port_state_name(enum sci_port_states state)
  63. {
  64. static const char * const strings[] = PORT_STATES;
  65. return strings[state];
  66. }
  67. #undef C
  68. static struct device *sciport_to_dev(struct isci_port *iport)
  69. {
  70. int i = iport->physical_port_index;
  71. struct isci_port *table;
  72. struct isci_host *ihost;
  73. if (i == SCIC_SDS_DUMMY_PORT)
  74. i = SCI_MAX_PORTS+1;
  75. table = iport - i;
  76. ihost = container_of(table, typeof(*ihost), ports[0]);
  77. return &ihost->pdev->dev;
  78. }
  79. static void sci_port_get_protocols(struct isci_port *iport, struct sci_phy_proto *proto)
  80. {
  81. u8 index;
  82. proto->all = 0;
  83. for (index = 0; index < SCI_MAX_PHYS; index++) {
  84. struct isci_phy *iphy = iport->phy_table[index];
  85. if (!iphy)
  86. continue;
  87. sci_phy_get_protocols(iphy, proto);
  88. }
  89. }
  90. static u32 sci_port_get_phys(struct isci_port *iport)
  91. {
  92. u32 index;
  93. u32 mask;
  94. mask = 0;
  95. for (index = 0; index < SCI_MAX_PHYS; index++)
  96. if (iport->phy_table[index])
  97. mask |= (1 << index);
  98. return mask;
  99. }
  100. /**
  101. * sci_port_get_properties() - This method simply returns the properties
  102. * regarding the port, such as: physical index, protocols, sas address, etc.
  103. * @port: this parameter specifies the port for which to retrieve the physical
  104. * index.
  105. * @properties: This parameter specifies the properties structure into which to
  106. * copy the requested information.
  107. *
  108. * Indicate if the user specified a valid port. SCI_SUCCESS This value is
  109. * returned if the specified port was valid. SCI_FAILURE_INVALID_PORT This
  110. * value is returned if the specified port is not valid. When this value is
  111. * returned, no data is copied to the properties output parameter.
  112. */
  113. enum sci_status sci_port_get_properties(struct isci_port *iport,
  114. struct sci_port_properties *prop)
  115. {
  116. if (!iport || iport->logical_port_index == SCIC_SDS_DUMMY_PORT)
  117. return SCI_FAILURE_INVALID_PORT;
  118. prop->index = iport->logical_port_index;
  119. prop->phy_mask = sci_port_get_phys(iport);
  120. sci_port_get_sas_address(iport, &prop->local.sas_address);
  121. sci_port_get_protocols(iport, &prop->local.protocols);
  122. sci_port_get_attached_sas_address(iport, &prop->remote.sas_address);
  123. return SCI_SUCCESS;
  124. }
  125. static void sci_port_bcn_enable(struct isci_port *iport)
  126. {
  127. struct isci_phy *iphy;
  128. u32 val;
  129. int i;
  130. for (i = 0; i < ARRAY_SIZE(iport->phy_table); i++) {
  131. iphy = iport->phy_table[i];
  132. if (!iphy)
  133. continue;
  134. val = readl(&iphy->link_layer_registers->link_layer_control);
  135. /* clear the bit by writing 1. */
  136. writel(val, &iphy->link_layer_registers->link_layer_control);
  137. }
  138. }
  139. static void isci_port_bc_change_received(struct isci_host *ihost,
  140. struct isci_port *iport,
  141. struct isci_phy *iphy)
  142. {
  143. dev_dbg(&ihost->pdev->dev,
  144. "%s: isci_phy = %p, sas_phy = %p\n",
  145. __func__, iphy, &iphy->sas_phy);
  146. ihost->sas_ha.notify_port_event(&iphy->sas_phy, PORTE_BROADCAST_RCVD);
  147. sci_port_bcn_enable(iport);
  148. }
  149. static void isci_port_link_up(struct isci_host *isci_host,
  150. struct isci_port *iport,
  151. struct isci_phy *iphy)
  152. {
  153. unsigned long flags;
  154. struct sci_port_properties properties;
  155. unsigned long success = true;
  156. dev_dbg(&isci_host->pdev->dev,
  157. "%s: isci_port = %p\n",
  158. __func__, iport);
  159. spin_lock_irqsave(&iphy->sas_phy.frame_rcvd_lock, flags);
  160. sci_port_get_properties(iport, &properties);
  161. if (iphy->protocol == SCIC_SDS_PHY_PROTOCOL_SATA) {
  162. u64 attached_sas_address;
  163. iphy->sas_phy.oob_mode = SATA_OOB_MODE;
  164. iphy->sas_phy.frame_rcvd_size = sizeof(struct dev_to_host_fis);
  165. /*
  166. * For direct-attached SATA devices, the SCI core will
  167. * automagically assign a SAS address to the end device
  168. * for the purpose of creating a port. This SAS address
  169. * will not be the same as assigned to the PHY and needs
  170. * to be obtained from struct sci_port_properties properties.
  171. */
  172. attached_sas_address = properties.remote.sas_address.high;
  173. attached_sas_address <<= 32;
  174. attached_sas_address |= properties.remote.sas_address.low;
  175. swab64s(&attached_sas_address);
  176. memcpy(&iphy->sas_phy.attached_sas_addr,
  177. &attached_sas_address, sizeof(attached_sas_address));
  178. } else if (iphy->protocol == SCIC_SDS_PHY_PROTOCOL_SAS) {
  179. iphy->sas_phy.oob_mode = SAS_OOB_MODE;
  180. iphy->sas_phy.frame_rcvd_size = sizeof(struct sas_identify_frame);
  181. /* Copy the attached SAS address from the IAF */
  182. memcpy(iphy->sas_phy.attached_sas_addr,
  183. iphy->frame_rcvd.iaf.sas_addr, SAS_ADDR_SIZE);
  184. } else {
  185. dev_err(&isci_host->pdev->dev, "%s: unkown target\n", __func__);
  186. success = false;
  187. }
  188. iphy->sas_phy.phy->negotiated_linkrate = sci_phy_linkrate(iphy);
  189. spin_unlock_irqrestore(&iphy->sas_phy.frame_rcvd_lock, flags);
  190. /* Notify libsas that we have an address frame, if indeed
  191. * we've found an SSP, SMP, or STP target */
  192. if (success)
  193. isci_host->sas_ha.notify_port_event(&iphy->sas_phy,
  194. PORTE_BYTES_DMAED);
  195. }
  196. /**
  197. * isci_port_link_down() - This function is called by the sci core when a link
  198. * becomes inactive.
  199. * @isci_host: This parameter specifies the isci host object.
  200. * @phy: This parameter specifies the isci phy with the active link.
  201. * @port: This parameter specifies the isci port with the active link.
  202. *
  203. */
  204. static void isci_port_link_down(struct isci_host *isci_host,
  205. struct isci_phy *isci_phy,
  206. struct isci_port *isci_port)
  207. {
  208. struct isci_remote_device *isci_device;
  209. dev_dbg(&isci_host->pdev->dev,
  210. "%s: isci_port = %p\n", __func__, isci_port);
  211. if (isci_port) {
  212. /* check to see if this is the last phy on this port. */
  213. if (isci_phy->sas_phy.port &&
  214. isci_phy->sas_phy.port->num_phys == 1) {
  215. /* change the state for all devices on this port. The
  216. * next task sent to this device will be returned as
  217. * SAS_TASK_UNDELIVERED, and the scsi mid layer will
  218. * remove the target
  219. */
  220. list_for_each_entry(isci_device,
  221. &isci_port->remote_dev_list,
  222. node) {
  223. dev_dbg(&isci_host->pdev->dev,
  224. "%s: isci_device = %p\n",
  225. __func__, isci_device);
  226. set_bit(IDEV_GONE, &isci_device->flags);
  227. }
  228. }
  229. }
  230. /* Notify libsas of the borken link, this will trigger calls to our
  231. * isci_port_deformed and isci_dev_gone functions.
  232. */
  233. sas_phy_disconnected(&isci_phy->sas_phy);
  234. isci_host->sas_ha.notify_phy_event(&isci_phy->sas_phy,
  235. PHYE_LOSS_OF_SIGNAL);
  236. dev_dbg(&isci_host->pdev->dev,
  237. "%s: isci_port = %p - Done\n", __func__, isci_port);
  238. }
  239. static bool is_port_ready_state(enum sci_port_states state)
  240. {
  241. switch (state) {
  242. case SCI_PORT_READY:
  243. case SCI_PORT_SUB_WAITING:
  244. case SCI_PORT_SUB_OPERATIONAL:
  245. case SCI_PORT_SUB_CONFIGURING:
  246. return true;
  247. default:
  248. return false;
  249. }
  250. }
  251. /* flag dummy rnc hanling when exiting a ready state */
  252. static void port_state_machine_change(struct isci_port *iport,
  253. enum sci_port_states state)
  254. {
  255. struct sci_base_state_machine *sm = &iport->sm;
  256. enum sci_port_states old_state = sm->current_state_id;
  257. if (is_port_ready_state(old_state) && !is_port_ready_state(state))
  258. iport->ready_exit = true;
  259. sci_change_state(sm, state);
  260. iport->ready_exit = false;
  261. }
  262. /**
  263. * isci_port_hard_reset_complete() - This function is called by the sci core
  264. * when the hard reset complete notification has been received.
  265. * @port: This parameter specifies the sci port with the active link.
  266. * @completion_status: This parameter specifies the core status for the reset
  267. * process.
  268. *
  269. */
  270. static void isci_port_hard_reset_complete(struct isci_port *isci_port,
  271. enum sci_status completion_status)
  272. {
  273. struct isci_host *ihost = isci_port->owning_controller;
  274. dev_dbg(&ihost->pdev->dev,
  275. "%s: isci_port = %p, completion_status=%x\n",
  276. __func__, isci_port, completion_status);
  277. /* Save the status of the hard reset from the port. */
  278. isci_port->hard_reset_status = completion_status;
  279. if (completion_status != SCI_SUCCESS) {
  280. /* The reset failed. The port state is now SCI_PORT_FAILED. */
  281. if (isci_port->active_phy_mask == 0) {
  282. int phy_idx = isci_port->last_active_phy;
  283. struct isci_phy *iphy = &ihost->phys[phy_idx];
  284. /* Generate the link down now to the host, since it
  285. * was intercepted by the hard reset state machine when
  286. * it really happened.
  287. */
  288. isci_port_link_down(ihost, iphy, isci_port);
  289. }
  290. /* Advance the port state so that link state changes will be
  291. * noticed.
  292. */
  293. port_state_machine_change(isci_port, SCI_PORT_SUB_WAITING);
  294. }
  295. clear_bit(IPORT_RESET_PENDING, &isci_port->state);
  296. wake_up(&ihost->eventq);
  297. }
  298. /* This method will return a true value if the specified phy can be assigned to
  299. * this port The following is a list of phys for each port that are allowed: -
  300. * Port 0 - 3 2 1 0 - Port 1 - 1 - Port 2 - 3 2 - Port 3 - 3 This method
  301. * doesn't preclude all configurations. It merely ensures that a phy is part
  302. * of the allowable set of phy identifiers for that port. For example, one
  303. * could assign phy 3 to port 0 and no other phys. Please refer to
  304. * sci_port_is_phy_mask_valid() for information regarding whether the
  305. * phy_mask for a port can be supported. bool true if this is a valid phy
  306. * assignment for the port false if this is not a valid phy assignment for the
  307. * port
  308. */
  309. bool sci_port_is_valid_phy_assignment(struct isci_port *iport, u32 phy_index)
  310. {
  311. struct isci_host *ihost = iport->owning_controller;
  312. struct sci_user_parameters *user = &ihost->user_parameters;
  313. /* Initialize to invalid value. */
  314. u32 existing_phy_index = SCI_MAX_PHYS;
  315. u32 index;
  316. if ((iport->physical_port_index == 1) && (phy_index != 1))
  317. return false;
  318. if (iport->physical_port_index == 3 && phy_index != 3)
  319. return false;
  320. if (iport->physical_port_index == 2 &&
  321. (phy_index == 0 || phy_index == 1))
  322. return false;
  323. for (index = 0; index < SCI_MAX_PHYS; index++)
  324. if (iport->phy_table[index] && index != phy_index)
  325. existing_phy_index = index;
  326. /* Ensure that all of the phys in the port are capable of
  327. * operating at the same maximum link rate.
  328. */
  329. if (existing_phy_index < SCI_MAX_PHYS &&
  330. user->phys[phy_index].max_speed_generation !=
  331. user->phys[existing_phy_index].max_speed_generation)
  332. return false;
  333. return true;
  334. }
  335. /**
  336. *
  337. * @sci_port: This is the port object for which to determine if the phy mask
  338. * can be supported.
  339. *
  340. * This method will return a true value if the port's phy mask can be supported
  341. * by the SCU. The following is a list of valid PHY mask configurations for
  342. * each port: - Port 0 - [[3 2] 1] 0 - Port 1 - [1] - Port 2 - [[3] 2]
  343. * - Port 3 - [3] This method returns a boolean indication specifying if the
  344. * phy mask can be supported. true if this is a valid phy assignment for the
  345. * port false if this is not a valid phy assignment for the port
  346. */
  347. static bool sci_port_is_phy_mask_valid(
  348. struct isci_port *iport,
  349. u32 phy_mask)
  350. {
  351. if (iport->physical_port_index == 0) {
  352. if (((phy_mask & 0x0F) == 0x0F)
  353. || ((phy_mask & 0x03) == 0x03)
  354. || ((phy_mask & 0x01) == 0x01)
  355. || (phy_mask == 0))
  356. return true;
  357. } else if (iport->physical_port_index == 1) {
  358. if (((phy_mask & 0x02) == 0x02)
  359. || (phy_mask == 0))
  360. return true;
  361. } else if (iport->physical_port_index == 2) {
  362. if (((phy_mask & 0x0C) == 0x0C)
  363. || ((phy_mask & 0x04) == 0x04)
  364. || (phy_mask == 0))
  365. return true;
  366. } else if (iport->physical_port_index == 3) {
  367. if (((phy_mask & 0x08) == 0x08)
  368. || (phy_mask == 0))
  369. return true;
  370. }
  371. return false;
  372. }
  373. /*
  374. * This method retrieves a currently active (i.e. connected) phy contained in
  375. * the port. Currently, the lowest order phy that is connected is returned.
  376. * This method returns a pointer to a SCIS_SDS_PHY object. NULL This value is
  377. * returned if there are no currently active (i.e. connected to a remote end
  378. * point) phys contained in the port. All other values specify a struct sci_phy
  379. * object that is active in the port.
  380. */
  381. static struct isci_phy *sci_port_get_a_connected_phy(struct isci_port *iport)
  382. {
  383. u32 index;
  384. struct isci_phy *iphy;
  385. for (index = 0; index < SCI_MAX_PHYS; index++) {
  386. /* Ensure that the phy is both part of the port and currently
  387. * connected to the remote end-point.
  388. */
  389. iphy = iport->phy_table[index];
  390. if (iphy && sci_port_active_phy(iport, iphy))
  391. return iphy;
  392. }
  393. return NULL;
  394. }
  395. static enum sci_status sci_port_set_phy(struct isci_port *iport, struct isci_phy *iphy)
  396. {
  397. /* Check to see if we can add this phy to a port
  398. * that means that the phy is not part of a port and that the port does
  399. * not already have a phy assinged to the phy index.
  400. */
  401. if (!iport->phy_table[iphy->phy_index] &&
  402. !phy_get_non_dummy_port(iphy) &&
  403. sci_port_is_valid_phy_assignment(iport, iphy->phy_index)) {
  404. /* Phy is being added in the stopped state so we are in MPC mode
  405. * make logical port index = physical port index
  406. */
  407. iport->logical_port_index = iport->physical_port_index;
  408. iport->phy_table[iphy->phy_index] = iphy;
  409. sci_phy_set_port(iphy, iport);
  410. return SCI_SUCCESS;
  411. }
  412. return SCI_FAILURE;
  413. }
  414. static enum sci_status sci_port_clear_phy(struct isci_port *iport, struct isci_phy *iphy)
  415. {
  416. /* Make sure that this phy is part of this port */
  417. if (iport->phy_table[iphy->phy_index] == iphy &&
  418. phy_get_non_dummy_port(iphy) == iport) {
  419. struct isci_host *ihost = iport->owning_controller;
  420. /* Yep it is assigned to this port so remove it */
  421. sci_phy_set_port(iphy, &ihost->ports[SCI_MAX_PORTS]);
  422. iport->phy_table[iphy->phy_index] = NULL;
  423. return SCI_SUCCESS;
  424. }
  425. return SCI_FAILURE;
  426. }
  427. void sci_port_get_sas_address(struct isci_port *iport, struct sci_sas_address *sas)
  428. {
  429. u32 index;
  430. sas->high = 0;
  431. sas->low = 0;
  432. for (index = 0; index < SCI_MAX_PHYS; index++)
  433. if (iport->phy_table[index])
  434. sci_phy_get_sas_address(iport->phy_table[index], sas);
  435. }
  436. void sci_port_get_attached_sas_address(struct isci_port *iport, struct sci_sas_address *sas)
  437. {
  438. struct isci_phy *iphy;
  439. /*
  440. * Ensure that the phy is both part of the port and currently
  441. * connected to the remote end-point.
  442. */
  443. iphy = sci_port_get_a_connected_phy(iport);
  444. if (iphy) {
  445. if (iphy->protocol != SCIC_SDS_PHY_PROTOCOL_SATA) {
  446. sci_phy_get_attached_sas_address(iphy, sas);
  447. } else {
  448. sci_phy_get_sas_address(iphy, sas);
  449. sas->low += iphy->phy_index;
  450. }
  451. } else {
  452. sas->high = 0;
  453. sas->low = 0;
  454. }
  455. }
  456. /**
  457. * sci_port_construct_dummy_rnc() - create dummy rnc for si workaround
  458. *
  459. * @sci_port: logical port on which we need to create the remote node context
  460. * @rni: remote node index for this remote node context.
  461. *
  462. * This routine will construct a dummy remote node context data structure
  463. * This structure will be posted to the hardware to work around a scheduler
  464. * error in the hardware.
  465. */
  466. static void sci_port_construct_dummy_rnc(struct isci_port *iport, u16 rni)
  467. {
  468. union scu_remote_node_context *rnc;
  469. rnc = &iport->owning_controller->remote_node_context_table[rni];
  470. memset(rnc, 0, sizeof(union scu_remote_node_context));
  471. rnc->ssp.remote_sas_address_hi = 0;
  472. rnc->ssp.remote_sas_address_lo = 0;
  473. rnc->ssp.remote_node_index = rni;
  474. rnc->ssp.remote_node_port_width = 1;
  475. rnc->ssp.logical_port_index = iport->physical_port_index;
  476. rnc->ssp.nexus_loss_timer_enable = false;
  477. rnc->ssp.check_bit = false;
  478. rnc->ssp.is_valid = true;
  479. rnc->ssp.is_remote_node_context = true;
  480. rnc->ssp.function_number = 0;
  481. rnc->ssp.arbitration_wait_time = 0;
  482. }
  483. /*
  484. * construct a dummy task context data structure. This
  485. * structure will be posted to the hardwre to work around a scheduler error
  486. * in the hardware.
  487. */
  488. static void sci_port_construct_dummy_task(struct isci_port *iport, u16 tag)
  489. {
  490. struct isci_host *ihost = iport->owning_controller;
  491. struct scu_task_context *task_context;
  492. task_context = &ihost->task_context_table[ISCI_TAG_TCI(tag)];
  493. memset(task_context, 0, sizeof(struct scu_task_context));
  494. task_context->initiator_request = 1;
  495. task_context->connection_rate = 1;
  496. task_context->logical_port_index = iport->physical_port_index;
  497. task_context->protocol_type = SCU_TASK_CONTEXT_PROTOCOL_SSP;
  498. task_context->task_index = ISCI_TAG_TCI(tag);
  499. task_context->valid = SCU_TASK_CONTEXT_VALID;
  500. task_context->context_type = SCU_TASK_CONTEXT_TYPE;
  501. task_context->remote_node_index = iport->reserved_rni;
  502. task_context->do_not_dma_ssp_good_response = 1;
  503. task_context->task_phase = 0x01;
  504. }
  505. static void sci_port_destroy_dummy_resources(struct isci_port *iport)
  506. {
  507. struct isci_host *ihost = iport->owning_controller;
  508. if (iport->reserved_tag != SCI_CONTROLLER_INVALID_IO_TAG)
  509. isci_free_tag(ihost, iport->reserved_tag);
  510. if (iport->reserved_rni != SCU_DUMMY_INDEX)
  511. sci_remote_node_table_release_remote_node_index(&ihost->available_remote_nodes,
  512. 1, iport->reserved_rni);
  513. iport->reserved_rni = SCU_DUMMY_INDEX;
  514. iport->reserved_tag = SCI_CONTROLLER_INVALID_IO_TAG;
  515. }
  516. void sci_port_setup_transports(struct isci_port *iport, u32 device_id)
  517. {
  518. u8 index;
  519. for (index = 0; index < SCI_MAX_PHYS; index++) {
  520. if (iport->active_phy_mask & (1 << index))
  521. sci_phy_setup_transport(iport->phy_table[index], device_id);
  522. }
  523. }
  524. static void sci_port_resume_phy(struct isci_port *iport, struct isci_phy *iphy)
  525. {
  526. sci_phy_resume(iphy);
  527. iport->enabled_phy_mask |= 1 << iphy->phy_index;
  528. }
  529. static void sci_port_activate_phy(struct isci_port *iport,
  530. struct isci_phy *iphy,
  531. u8 flags)
  532. {
  533. struct isci_host *ihost = iport->owning_controller;
  534. if (iphy->protocol != SCIC_SDS_PHY_PROTOCOL_SATA && (flags & PF_RESUME))
  535. sci_phy_resume(iphy);
  536. iport->active_phy_mask |= 1 << iphy->phy_index;
  537. sci_controller_clear_invalid_phy(ihost, iphy);
  538. if (flags & PF_NOTIFY)
  539. isci_port_link_up(ihost, iport, iphy);
  540. }
  541. void sci_port_deactivate_phy(struct isci_port *iport, struct isci_phy *iphy,
  542. bool do_notify_user)
  543. {
  544. struct isci_host *ihost = iport->owning_controller;
  545. iport->active_phy_mask &= ~(1 << iphy->phy_index);
  546. iport->enabled_phy_mask &= ~(1 << iphy->phy_index);
  547. if (!iport->active_phy_mask)
  548. iport->last_active_phy = iphy->phy_index;
  549. iphy->max_negotiated_speed = SAS_LINK_RATE_UNKNOWN;
  550. /* Re-assign the phy back to the LP as if it were a narrow port for APC
  551. * mode. For MPC mode, the phy will remain in the port.
  552. */
  553. if (iport->owning_controller->oem_parameters.controller.mode_type ==
  554. SCIC_PORT_AUTOMATIC_CONFIGURATION_MODE)
  555. writel(iphy->phy_index,
  556. &iport->port_pe_configuration_register[iphy->phy_index]);
  557. if (do_notify_user == true)
  558. isci_port_link_down(ihost, iphy, iport);
  559. }
  560. static void sci_port_invalid_link_up(struct isci_port *iport, struct isci_phy *iphy)
  561. {
  562. struct isci_host *ihost = iport->owning_controller;
  563. /*
  564. * Check to see if we have alreay reported this link as bad and if
  565. * not go ahead and tell the SCI_USER that we have discovered an
  566. * invalid link.
  567. */
  568. if ((ihost->invalid_phy_mask & (1 << iphy->phy_index)) == 0) {
  569. ihost->invalid_phy_mask |= 1 << iphy->phy_index;
  570. dev_warn(&ihost->pdev->dev, "Invalid link up!\n");
  571. }
  572. }
  573. /**
  574. * sci_port_general_link_up_handler - phy can be assigned to port?
  575. * @sci_port: sci_port object for which has a phy that has gone link up.
  576. * @sci_phy: This is the struct isci_phy object that has gone link up.
  577. * @flags: PF_RESUME, PF_NOTIFY to sci_port_activate_phy
  578. *
  579. * Determine if this phy can be assigned to this port . If the phy is
  580. * not a valid PHY for this port then the function will notify the user.
  581. * A PHY can only be part of a port if it's attached SAS ADDRESS is the
  582. * same as all other PHYs in the same port.
  583. */
  584. static void sci_port_general_link_up_handler(struct isci_port *iport,
  585. struct isci_phy *iphy,
  586. u8 flags)
  587. {
  588. struct sci_sas_address port_sas_address;
  589. struct sci_sas_address phy_sas_address;
  590. sci_port_get_attached_sas_address(iport, &port_sas_address);
  591. sci_phy_get_attached_sas_address(iphy, &phy_sas_address);
  592. /* If the SAS address of the new phy matches the SAS address of
  593. * other phys in the port OR this is the first phy in the port,
  594. * then activate the phy and allow it to be used for operations
  595. * in this port.
  596. */
  597. if ((phy_sas_address.high == port_sas_address.high &&
  598. phy_sas_address.low == port_sas_address.low) ||
  599. iport->active_phy_mask == 0) {
  600. struct sci_base_state_machine *sm = &iport->sm;
  601. sci_port_activate_phy(iport, iphy, flags);
  602. if (sm->current_state_id == SCI_PORT_RESETTING)
  603. port_state_machine_change(iport, SCI_PORT_READY);
  604. } else
  605. sci_port_invalid_link_up(iport, iphy);
  606. }
  607. /**
  608. * This method returns false if the port only has a single phy object assigned.
  609. * If there are no phys or more than one phy then the method will return
  610. * true.
  611. * @sci_port: The port for which the wide port condition is to be checked.
  612. *
  613. * bool true Is returned if this is a wide ported port. false Is returned if
  614. * this is a narrow port.
  615. */
  616. static bool sci_port_is_wide(struct isci_port *iport)
  617. {
  618. u32 index;
  619. u32 phy_count = 0;
  620. for (index = 0; index < SCI_MAX_PHYS; index++) {
  621. if (iport->phy_table[index] != NULL) {
  622. phy_count++;
  623. }
  624. }
  625. return phy_count != 1;
  626. }
  627. /**
  628. * This method is called by the PHY object when the link is detected. if the
  629. * port wants the PHY to continue on to the link up state then the port
  630. * layer must return true. If the port object returns false the phy object
  631. * must halt its attempt to go link up.
  632. * @sci_port: The port associated with the phy object.
  633. * @sci_phy: The phy object that is trying to go link up.
  634. *
  635. * true if the phy object can continue to the link up condition. true Is
  636. * returned if this phy can continue to the ready state. false Is returned if
  637. * can not continue on to the ready state. This notification is in place for
  638. * wide ports and direct attached phys. Since there are no wide ported SATA
  639. * devices this could become an invalid port configuration.
  640. */
  641. bool sci_port_link_detected(
  642. struct isci_port *iport,
  643. struct isci_phy *iphy)
  644. {
  645. if ((iport->logical_port_index != SCIC_SDS_DUMMY_PORT) &&
  646. (iphy->protocol == SCIC_SDS_PHY_PROTOCOL_SATA)) {
  647. if (sci_port_is_wide(iport)) {
  648. sci_port_invalid_link_up(iport, iphy);
  649. return false;
  650. } else {
  651. struct isci_host *ihost = iport->owning_controller;
  652. struct isci_port *dst_port = &(ihost->ports[iphy->phy_index]);
  653. writel(iphy->phy_index,
  654. &dst_port->port_pe_configuration_register[iphy->phy_index]);
  655. }
  656. }
  657. return true;
  658. }
  659. static void port_timeout(unsigned long data)
  660. {
  661. struct sci_timer *tmr = (struct sci_timer *)data;
  662. struct isci_port *iport = container_of(tmr, typeof(*iport), timer);
  663. struct isci_host *ihost = iport->owning_controller;
  664. unsigned long flags;
  665. u32 current_state;
  666. spin_lock_irqsave(&ihost->scic_lock, flags);
  667. if (tmr->cancel)
  668. goto done;
  669. current_state = iport->sm.current_state_id;
  670. if (current_state == SCI_PORT_RESETTING) {
  671. /* if the port is still in the resetting state then the timeout
  672. * fired before the reset completed.
  673. */
  674. port_state_machine_change(iport, SCI_PORT_FAILED);
  675. } else if (current_state == SCI_PORT_STOPPED) {
  676. /* if the port is stopped then the start request failed In this
  677. * case stay in the stopped state.
  678. */
  679. dev_err(sciport_to_dev(iport),
  680. "%s: SCIC Port 0x%p failed to stop before tiemout.\n",
  681. __func__,
  682. iport);
  683. } else if (current_state == SCI_PORT_STOPPING) {
  684. dev_dbg(sciport_to_dev(iport),
  685. "%s: port%d: stop complete timeout\n",
  686. __func__, iport->physical_port_index);
  687. } else {
  688. /* The port is in the ready state and we have a timer
  689. * reporting a timeout this should not happen.
  690. */
  691. dev_err(sciport_to_dev(iport),
  692. "%s: SCIC Port 0x%p is processing a timeout operation "
  693. "in state %d.\n", __func__, iport, current_state);
  694. }
  695. done:
  696. spin_unlock_irqrestore(&ihost->scic_lock, flags);
  697. }
  698. /* --------------------------------------------------------------------------- */
  699. /**
  700. * This function updates the hardwares VIIT entry for this port.
  701. *
  702. *
  703. */
  704. static void sci_port_update_viit_entry(struct isci_port *iport)
  705. {
  706. struct sci_sas_address sas_address;
  707. sci_port_get_sas_address(iport, &sas_address);
  708. writel(sas_address.high,
  709. &iport->viit_registers->initiator_sas_address_hi);
  710. writel(sas_address.low,
  711. &iport->viit_registers->initiator_sas_address_lo);
  712. /* This value get cleared just in case its not already cleared */
  713. writel(0, &iport->viit_registers->reserved);
  714. /* We are required to update the status register last */
  715. writel(SCU_VIIT_ENTRY_ID_VIIT |
  716. SCU_VIIT_IPPT_INITIATOR |
  717. ((1 << iport->physical_port_index) << SCU_VIIT_ENTRY_LPVIE_SHIFT) |
  718. SCU_VIIT_STATUS_ALL_VALID,
  719. &iport->viit_registers->status);
  720. }
  721. enum sas_linkrate sci_port_get_max_allowed_speed(struct isci_port *iport)
  722. {
  723. u16 index;
  724. struct isci_phy *iphy;
  725. enum sas_linkrate max_allowed_speed = SAS_LINK_RATE_6_0_GBPS;
  726. /*
  727. * Loop through all of the phys in this port and find the phy with the
  728. * lowest maximum link rate. */
  729. for (index = 0; index < SCI_MAX_PHYS; index++) {
  730. iphy = iport->phy_table[index];
  731. if (iphy && sci_port_active_phy(iport, iphy) &&
  732. iphy->max_negotiated_speed < max_allowed_speed)
  733. max_allowed_speed = iphy->max_negotiated_speed;
  734. }
  735. return max_allowed_speed;
  736. }
  737. static void sci_port_suspend_port_task_scheduler(struct isci_port *iport)
  738. {
  739. u32 pts_control_value;
  740. pts_control_value = readl(&iport->port_task_scheduler_registers->control);
  741. pts_control_value |= SCU_PTSxCR_GEN_BIT(SUSPEND);
  742. writel(pts_control_value, &iport->port_task_scheduler_registers->control);
  743. }
  744. /**
  745. * sci_port_post_dummy_request() - post dummy/workaround request
  746. * @sci_port: port to post task
  747. *
  748. * Prevent the hardware scheduler from posting new requests to the front
  749. * of the scheduler queue causing a starvation problem for currently
  750. * ongoing requests.
  751. *
  752. */
  753. static void sci_port_post_dummy_request(struct isci_port *iport)
  754. {
  755. struct isci_host *ihost = iport->owning_controller;
  756. u16 tag = iport->reserved_tag;
  757. struct scu_task_context *tc;
  758. u32 command;
  759. tc = &ihost->task_context_table[ISCI_TAG_TCI(tag)];
  760. tc->abort = 0;
  761. command = SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC |
  762. iport->physical_port_index << SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT |
  763. ISCI_TAG_TCI(tag);
  764. sci_controller_post_request(ihost, command);
  765. }
  766. /**
  767. * This routine will abort the dummy request. This will alow the hardware to
  768. * power down parts of the silicon to save power.
  769. *
  770. * @sci_port: The port on which the task must be aborted.
  771. *
  772. */
  773. static void sci_port_abort_dummy_request(struct isci_port *iport)
  774. {
  775. struct isci_host *ihost = iport->owning_controller;
  776. u16 tag = iport->reserved_tag;
  777. struct scu_task_context *tc;
  778. u32 command;
  779. tc = &ihost->task_context_table[ISCI_TAG_TCI(tag)];
  780. tc->abort = 1;
  781. command = SCU_CONTEXT_COMMAND_REQUEST_POST_TC_ABORT |
  782. iport->physical_port_index << SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT |
  783. ISCI_TAG_TCI(tag);
  784. sci_controller_post_request(ihost, command);
  785. }
  786. /**
  787. *
  788. * @sci_port: This is the struct isci_port object to resume.
  789. *
  790. * This method will resume the port task scheduler for this port object. none
  791. */
  792. static void
  793. sci_port_resume_port_task_scheduler(struct isci_port *iport)
  794. {
  795. u32 pts_control_value;
  796. pts_control_value = readl(&iport->port_task_scheduler_registers->control);
  797. pts_control_value &= ~SCU_PTSxCR_GEN_BIT(SUSPEND);
  798. writel(pts_control_value, &iport->port_task_scheduler_registers->control);
  799. }
  800. static void sci_port_ready_substate_waiting_enter(struct sci_base_state_machine *sm)
  801. {
  802. struct isci_port *iport = container_of(sm, typeof(*iport), sm);
  803. sci_port_suspend_port_task_scheduler(iport);
  804. iport->not_ready_reason = SCIC_PORT_NOT_READY_NO_ACTIVE_PHYS;
  805. if (iport->active_phy_mask != 0) {
  806. /* At least one of the phys on the port is ready */
  807. port_state_machine_change(iport,
  808. SCI_PORT_SUB_OPERATIONAL);
  809. }
  810. }
  811. static void scic_sds_port_ready_substate_waiting_exit(
  812. struct sci_base_state_machine *sm)
  813. {
  814. struct isci_port *iport = container_of(sm, typeof(*iport), sm);
  815. sci_port_resume_port_task_scheduler(iport);
  816. }
  817. static void sci_port_ready_substate_operational_enter(struct sci_base_state_machine *sm)
  818. {
  819. u32 index;
  820. struct isci_port *iport = container_of(sm, typeof(*iport), sm);
  821. struct isci_host *ihost = iport->owning_controller;
  822. dev_dbg(&ihost->pdev->dev, "%s: port%d ready\n",
  823. __func__, iport->physical_port_index);
  824. for (index = 0; index < SCI_MAX_PHYS; index++) {
  825. if (iport->phy_table[index]) {
  826. writel(iport->physical_port_index,
  827. &iport->port_pe_configuration_register[
  828. iport->phy_table[index]->phy_index]);
  829. if (((iport->active_phy_mask^iport->enabled_phy_mask) & (1 << index)) != 0)
  830. sci_port_resume_phy(iport, iport->phy_table[index]);
  831. }
  832. }
  833. sci_port_update_viit_entry(iport);
  834. /*
  835. * Post the dummy task for the port so the hardware can schedule
  836. * io correctly
  837. */
  838. sci_port_post_dummy_request(iport);
  839. }
  840. static void sci_port_invalidate_dummy_remote_node(struct isci_port *iport)
  841. {
  842. struct isci_host *ihost = iport->owning_controller;
  843. u8 phys_index = iport->physical_port_index;
  844. union scu_remote_node_context *rnc;
  845. u16 rni = iport->reserved_rni;
  846. u32 command;
  847. rnc = &ihost->remote_node_context_table[rni];
  848. rnc->ssp.is_valid = false;
  849. /* ensure the preceding tc abort request has reached the
  850. * controller and give it ample time to act before posting the rnc
  851. * invalidate
  852. */
  853. readl(&ihost->smu_registers->interrupt_status); /* flush */
  854. udelay(10);
  855. command = SCU_CONTEXT_COMMAND_POST_RNC_INVALIDATE |
  856. phys_index << SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT | rni;
  857. sci_controller_post_request(ihost, command);
  858. }
  859. /**
  860. *
  861. * @object: This is the object which is cast to a struct isci_port object.
  862. *
  863. * This method will perform the actions required by the struct isci_port on
  864. * exiting the SCI_PORT_SUB_OPERATIONAL. This function reports
  865. * the port not ready and suspends the port task scheduler. none
  866. */
  867. static void sci_port_ready_substate_operational_exit(struct sci_base_state_machine *sm)
  868. {
  869. struct isci_port *iport = container_of(sm, typeof(*iport), sm);
  870. struct isci_host *ihost = iport->owning_controller;
  871. /*
  872. * Kill the dummy task for this port if it has not yet posted
  873. * the hardware will treat this as a NOP and just return abort
  874. * complete.
  875. */
  876. sci_port_abort_dummy_request(iport);
  877. dev_dbg(&ihost->pdev->dev, "%s: port%d !ready\n",
  878. __func__, iport->physical_port_index);
  879. if (iport->ready_exit)
  880. sci_port_invalidate_dummy_remote_node(iport);
  881. }
  882. static void sci_port_ready_substate_configuring_enter(struct sci_base_state_machine *sm)
  883. {
  884. struct isci_port *iport = container_of(sm, typeof(*iport), sm);
  885. struct isci_host *ihost = iport->owning_controller;
  886. if (iport->active_phy_mask == 0) {
  887. dev_dbg(&ihost->pdev->dev, "%s: port%d !ready\n",
  888. __func__, iport->physical_port_index);
  889. port_state_machine_change(iport, SCI_PORT_SUB_WAITING);
  890. } else
  891. port_state_machine_change(iport, SCI_PORT_SUB_OPERATIONAL);
  892. }
  893. enum sci_status sci_port_start(struct isci_port *iport)
  894. {
  895. struct isci_host *ihost = iport->owning_controller;
  896. enum sci_status status = SCI_SUCCESS;
  897. enum sci_port_states state;
  898. u32 phy_mask;
  899. state = iport->sm.current_state_id;
  900. if (state != SCI_PORT_STOPPED) {
  901. dev_warn(sciport_to_dev(iport), "%s: in wrong state: %s\n",
  902. __func__, port_state_name(state));
  903. return SCI_FAILURE_INVALID_STATE;
  904. }
  905. if (iport->assigned_device_count > 0) {
  906. /* TODO This is a start failure operation because
  907. * there are still devices assigned to this port.
  908. * There must be no devices assigned to a port on a
  909. * start operation.
  910. */
  911. return SCI_FAILURE_UNSUPPORTED_PORT_CONFIGURATION;
  912. }
  913. if (iport->reserved_rni == SCU_DUMMY_INDEX) {
  914. u16 rni = sci_remote_node_table_allocate_remote_node(
  915. &ihost->available_remote_nodes, 1);
  916. if (rni != SCU_DUMMY_INDEX)
  917. sci_port_construct_dummy_rnc(iport, rni);
  918. else
  919. status = SCI_FAILURE_INSUFFICIENT_RESOURCES;
  920. iport->reserved_rni = rni;
  921. }
  922. if (iport->reserved_tag == SCI_CONTROLLER_INVALID_IO_TAG) {
  923. u16 tag;
  924. tag = isci_alloc_tag(ihost);
  925. if (tag == SCI_CONTROLLER_INVALID_IO_TAG)
  926. status = SCI_FAILURE_INSUFFICIENT_RESOURCES;
  927. else
  928. sci_port_construct_dummy_task(iport, tag);
  929. iport->reserved_tag = tag;
  930. }
  931. if (status == SCI_SUCCESS) {
  932. phy_mask = sci_port_get_phys(iport);
  933. /*
  934. * There are one or more phys assigned to this port. Make sure
  935. * the port's phy mask is in fact legal and supported by the
  936. * silicon.
  937. */
  938. if (sci_port_is_phy_mask_valid(iport, phy_mask) == true) {
  939. port_state_machine_change(iport,
  940. SCI_PORT_READY);
  941. return SCI_SUCCESS;
  942. }
  943. status = SCI_FAILURE;
  944. }
  945. if (status != SCI_SUCCESS)
  946. sci_port_destroy_dummy_resources(iport);
  947. return status;
  948. }
  949. enum sci_status sci_port_stop(struct isci_port *iport)
  950. {
  951. enum sci_port_states state;
  952. state = iport->sm.current_state_id;
  953. switch (state) {
  954. case SCI_PORT_STOPPED:
  955. return SCI_SUCCESS;
  956. case SCI_PORT_SUB_WAITING:
  957. case SCI_PORT_SUB_OPERATIONAL:
  958. case SCI_PORT_SUB_CONFIGURING:
  959. case SCI_PORT_RESETTING:
  960. port_state_machine_change(iport,
  961. SCI_PORT_STOPPING);
  962. return SCI_SUCCESS;
  963. default:
  964. dev_warn(sciport_to_dev(iport), "%s: in wrong state: %s\n",
  965. __func__, port_state_name(state));
  966. return SCI_FAILURE_INVALID_STATE;
  967. }
  968. }
  969. static enum sci_status sci_port_hard_reset(struct isci_port *iport, u32 timeout)
  970. {
  971. enum sci_status status = SCI_FAILURE_INVALID_PHY;
  972. struct isci_phy *iphy = NULL;
  973. enum sci_port_states state;
  974. u32 phy_index;
  975. state = iport->sm.current_state_id;
  976. if (state != SCI_PORT_SUB_OPERATIONAL) {
  977. dev_warn(sciport_to_dev(iport), "%s: in wrong state: %s\n",
  978. __func__, port_state_name(state));
  979. return SCI_FAILURE_INVALID_STATE;
  980. }
  981. /* Select a phy on which we can send the hard reset request. */
  982. for (phy_index = 0; phy_index < SCI_MAX_PHYS && !iphy; phy_index++) {
  983. iphy = iport->phy_table[phy_index];
  984. if (iphy && !sci_port_active_phy(iport, iphy)) {
  985. /*
  986. * We found a phy but it is not ready select
  987. * different phy
  988. */
  989. iphy = NULL;
  990. }
  991. }
  992. /* If we have a phy then go ahead and start the reset procedure */
  993. if (!iphy)
  994. return status;
  995. status = sci_phy_reset(iphy);
  996. if (status != SCI_SUCCESS)
  997. return status;
  998. sci_mod_timer(&iport->timer, timeout);
  999. iport->not_ready_reason = SCIC_PORT_NOT_READY_HARD_RESET_REQUESTED;
  1000. port_state_machine_change(iport, SCI_PORT_RESETTING);
  1001. return SCI_SUCCESS;
  1002. }
  1003. /**
  1004. * sci_port_add_phy() -
  1005. * @sci_port: This parameter specifies the port in which the phy will be added.
  1006. * @sci_phy: This parameter is the phy which is to be added to the port.
  1007. *
  1008. * This method will add a PHY to the selected port. This method returns an
  1009. * enum sci_status. SCI_SUCCESS the phy has been added to the port. Any other
  1010. * status is a failure to add the phy to the port.
  1011. */
  1012. enum sci_status sci_port_add_phy(struct isci_port *iport,
  1013. struct isci_phy *iphy)
  1014. {
  1015. enum sci_status status;
  1016. enum sci_port_states state;
  1017. state = iport->sm.current_state_id;
  1018. switch (state) {
  1019. case SCI_PORT_STOPPED: {
  1020. struct sci_sas_address port_sas_address;
  1021. /* Read the port assigned SAS Address if there is one */
  1022. sci_port_get_sas_address(iport, &port_sas_address);
  1023. if (port_sas_address.high != 0 && port_sas_address.low != 0) {
  1024. struct sci_sas_address phy_sas_address;
  1025. /* Make sure that the PHY SAS Address matches the SAS Address
  1026. * for this port
  1027. */
  1028. sci_phy_get_sas_address(iphy, &phy_sas_address);
  1029. if (port_sas_address.high != phy_sas_address.high ||
  1030. port_sas_address.low != phy_sas_address.low)
  1031. return SCI_FAILURE_UNSUPPORTED_PORT_CONFIGURATION;
  1032. }
  1033. return sci_port_set_phy(iport, iphy);
  1034. }
  1035. case SCI_PORT_SUB_WAITING:
  1036. case SCI_PORT_SUB_OPERATIONAL:
  1037. status = sci_port_set_phy(iport, iphy);
  1038. if (status != SCI_SUCCESS)
  1039. return status;
  1040. sci_port_general_link_up_handler(iport, iphy, PF_NOTIFY|PF_RESUME);
  1041. iport->not_ready_reason = SCIC_PORT_NOT_READY_RECONFIGURING;
  1042. port_state_machine_change(iport, SCI_PORT_SUB_CONFIGURING);
  1043. return status;
  1044. case SCI_PORT_SUB_CONFIGURING:
  1045. status = sci_port_set_phy(iport, iphy);
  1046. if (status != SCI_SUCCESS)
  1047. return status;
  1048. sci_port_general_link_up_handler(iport, iphy, PF_NOTIFY);
  1049. /* Re-enter the configuring state since this may be the last phy in
  1050. * the port.
  1051. */
  1052. port_state_machine_change(iport,
  1053. SCI_PORT_SUB_CONFIGURING);
  1054. return SCI_SUCCESS;
  1055. default:
  1056. dev_warn(sciport_to_dev(iport), "%s: in wrong state: %s\n",
  1057. __func__, port_state_name(state));
  1058. return SCI_FAILURE_INVALID_STATE;
  1059. }
  1060. }
  1061. /**
  1062. * sci_port_remove_phy() -
  1063. * @sci_port: This parameter specifies the port in which the phy will be added.
  1064. * @sci_phy: This parameter is the phy which is to be added to the port.
  1065. *
  1066. * This method will remove the PHY from the selected PORT. This method returns
  1067. * an enum sci_status. SCI_SUCCESS the phy has been removed from the port. Any
  1068. * other status is a failure to add the phy to the port.
  1069. */
  1070. enum sci_status sci_port_remove_phy(struct isci_port *iport,
  1071. struct isci_phy *iphy)
  1072. {
  1073. enum sci_status status;
  1074. enum sci_port_states state;
  1075. state = iport->sm.current_state_id;
  1076. switch (state) {
  1077. case SCI_PORT_STOPPED:
  1078. return sci_port_clear_phy(iport, iphy);
  1079. case SCI_PORT_SUB_OPERATIONAL:
  1080. status = sci_port_clear_phy(iport, iphy);
  1081. if (status != SCI_SUCCESS)
  1082. return status;
  1083. sci_port_deactivate_phy(iport, iphy, true);
  1084. iport->not_ready_reason = SCIC_PORT_NOT_READY_RECONFIGURING;
  1085. port_state_machine_change(iport,
  1086. SCI_PORT_SUB_CONFIGURING);
  1087. return SCI_SUCCESS;
  1088. case SCI_PORT_SUB_CONFIGURING:
  1089. status = sci_port_clear_phy(iport, iphy);
  1090. if (status != SCI_SUCCESS)
  1091. return status;
  1092. sci_port_deactivate_phy(iport, iphy, true);
  1093. /* Re-enter the configuring state since this may be the last phy in
  1094. * the port
  1095. */
  1096. port_state_machine_change(iport,
  1097. SCI_PORT_SUB_CONFIGURING);
  1098. return SCI_SUCCESS;
  1099. default:
  1100. dev_warn(sciport_to_dev(iport), "%s: in wrong state: %s\n",
  1101. __func__, port_state_name(state));
  1102. return SCI_FAILURE_INVALID_STATE;
  1103. }
  1104. }
  1105. enum sci_status sci_port_link_up(struct isci_port *iport,
  1106. struct isci_phy *iphy)
  1107. {
  1108. enum sci_port_states state;
  1109. state = iport->sm.current_state_id;
  1110. switch (state) {
  1111. case SCI_PORT_SUB_WAITING:
  1112. /* Since this is the first phy going link up for the port we
  1113. * can just enable it and continue
  1114. */
  1115. sci_port_activate_phy(iport, iphy, PF_NOTIFY|PF_RESUME);
  1116. port_state_machine_change(iport,
  1117. SCI_PORT_SUB_OPERATIONAL);
  1118. return SCI_SUCCESS;
  1119. case SCI_PORT_SUB_OPERATIONAL:
  1120. sci_port_general_link_up_handler(iport, iphy, PF_NOTIFY|PF_RESUME);
  1121. return SCI_SUCCESS;
  1122. case SCI_PORT_RESETTING:
  1123. /* TODO We should make sure that the phy that has gone
  1124. * link up is the same one on which we sent the reset. It is
  1125. * possible that the phy on which we sent the reset is not the
  1126. * one that has gone link up and we want to make sure that
  1127. * phy being reset comes back. Consider the case where a
  1128. * reset is sent but before the hardware processes the reset it
  1129. * get a link up on the port because of a hot plug event.
  1130. * because of the reset request this phy will go link down
  1131. * almost immediately.
  1132. */
  1133. /* In the resetting state we don't notify the user regarding
  1134. * link up and link down notifications.
  1135. */
  1136. sci_port_general_link_up_handler(iport, iphy, PF_RESUME);
  1137. return SCI_SUCCESS;
  1138. default:
  1139. dev_warn(sciport_to_dev(iport), "%s: in wrong state: %s\n",
  1140. __func__, port_state_name(state));
  1141. return SCI_FAILURE_INVALID_STATE;
  1142. }
  1143. }
  1144. enum sci_status sci_port_link_down(struct isci_port *iport,
  1145. struct isci_phy *iphy)
  1146. {
  1147. enum sci_port_states state;
  1148. state = iport->sm.current_state_id;
  1149. switch (state) {
  1150. case SCI_PORT_SUB_OPERATIONAL:
  1151. sci_port_deactivate_phy(iport, iphy, true);
  1152. /* If there are no active phys left in the port, then
  1153. * transition the port to the WAITING state until such time
  1154. * as a phy goes link up
  1155. */
  1156. if (iport->active_phy_mask == 0)
  1157. port_state_machine_change(iport,
  1158. SCI_PORT_SUB_WAITING);
  1159. return SCI_SUCCESS;
  1160. case SCI_PORT_RESETTING:
  1161. /* In the resetting state we don't notify the user regarding
  1162. * link up and link down notifications. */
  1163. sci_port_deactivate_phy(iport, iphy, false);
  1164. return SCI_SUCCESS;
  1165. default:
  1166. dev_warn(sciport_to_dev(iport), "%s: in wrong state: %s\n",
  1167. __func__, port_state_name(state));
  1168. return SCI_FAILURE_INVALID_STATE;
  1169. }
  1170. }
  1171. enum sci_status sci_port_start_io(struct isci_port *iport,
  1172. struct isci_remote_device *idev,
  1173. struct isci_request *ireq)
  1174. {
  1175. enum sci_port_states state;
  1176. state = iport->sm.current_state_id;
  1177. switch (state) {
  1178. case SCI_PORT_SUB_WAITING:
  1179. return SCI_FAILURE_INVALID_STATE;
  1180. case SCI_PORT_SUB_OPERATIONAL:
  1181. iport->started_request_count++;
  1182. return SCI_SUCCESS;
  1183. default:
  1184. dev_warn(sciport_to_dev(iport), "%s: in wrong state: %s\n",
  1185. __func__, port_state_name(state));
  1186. return SCI_FAILURE_INVALID_STATE;
  1187. }
  1188. }
  1189. enum sci_status sci_port_complete_io(struct isci_port *iport,
  1190. struct isci_remote_device *idev,
  1191. struct isci_request *ireq)
  1192. {
  1193. enum sci_port_states state;
  1194. state = iport->sm.current_state_id;
  1195. switch (state) {
  1196. case SCI_PORT_STOPPED:
  1197. dev_warn(sciport_to_dev(iport), "%s: in wrong state: %s\n",
  1198. __func__, port_state_name(state));
  1199. return SCI_FAILURE_INVALID_STATE;
  1200. case SCI_PORT_STOPPING:
  1201. sci_port_decrement_request_count(iport);
  1202. if (iport->started_request_count == 0)
  1203. port_state_machine_change(iport,
  1204. SCI_PORT_STOPPED);
  1205. break;
  1206. case SCI_PORT_READY:
  1207. case SCI_PORT_RESETTING:
  1208. case SCI_PORT_FAILED:
  1209. case SCI_PORT_SUB_WAITING:
  1210. case SCI_PORT_SUB_OPERATIONAL:
  1211. sci_port_decrement_request_count(iport);
  1212. break;
  1213. case SCI_PORT_SUB_CONFIGURING:
  1214. sci_port_decrement_request_count(iport);
  1215. if (iport->started_request_count == 0) {
  1216. port_state_machine_change(iport,
  1217. SCI_PORT_SUB_OPERATIONAL);
  1218. }
  1219. break;
  1220. }
  1221. return SCI_SUCCESS;
  1222. }
  1223. static void sci_port_enable_port_task_scheduler(struct isci_port *iport)
  1224. {
  1225. u32 pts_control_value;
  1226. /* enable the port task scheduler in a suspended state */
  1227. pts_control_value = readl(&iport->port_task_scheduler_registers->control);
  1228. pts_control_value |= SCU_PTSxCR_GEN_BIT(ENABLE) | SCU_PTSxCR_GEN_BIT(SUSPEND);
  1229. writel(pts_control_value, &iport->port_task_scheduler_registers->control);
  1230. }
  1231. static void sci_port_disable_port_task_scheduler(struct isci_port *iport)
  1232. {
  1233. u32 pts_control_value;
  1234. pts_control_value = readl(&iport->port_task_scheduler_registers->control);
  1235. pts_control_value &=
  1236. ~(SCU_PTSxCR_GEN_BIT(ENABLE) | SCU_PTSxCR_GEN_BIT(SUSPEND));
  1237. writel(pts_control_value, &iport->port_task_scheduler_registers->control);
  1238. }
  1239. static void sci_port_post_dummy_remote_node(struct isci_port *iport)
  1240. {
  1241. struct isci_host *ihost = iport->owning_controller;
  1242. u8 phys_index = iport->physical_port_index;
  1243. union scu_remote_node_context *rnc;
  1244. u16 rni = iport->reserved_rni;
  1245. u32 command;
  1246. rnc = &ihost->remote_node_context_table[rni];
  1247. rnc->ssp.is_valid = true;
  1248. command = SCU_CONTEXT_COMMAND_POST_RNC_32 |
  1249. phys_index << SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT | rni;
  1250. sci_controller_post_request(ihost, command);
  1251. /* ensure hardware has seen the post rnc command and give it
  1252. * ample time to act before sending the suspend
  1253. */
  1254. readl(&ihost->smu_registers->interrupt_status); /* flush */
  1255. udelay(10);
  1256. command = SCU_CONTEXT_COMMAND_POST_RNC_SUSPEND_TX_RX |
  1257. phys_index << SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT | rni;
  1258. sci_controller_post_request(ihost, command);
  1259. }
  1260. static void sci_port_stopped_state_enter(struct sci_base_state_machine *sm)
  1261. {
  1262. struct isci_port *iport = container_of(sm, typeof(*iport), sm);
  1263. if (iport->sm.previous_state_id == SCI_PORT_STOPPING) {
  1264. /*
  1265. * If we enter this state becasuse of a request to stop
  1266. * the port then we want to disable the hardwares port
  1267. * task scheduler. */
  1268. sci_port_disable_port_task_scheduler(iport);
  1269. }
  1270. }
  1271. static void sci_port_stopped_state_exit(struct sci_base_state_machine *sm)
  1272. {
  1273. struct isci_port *iport = container_of(sm, typeof(*iport), sm);
  1274. /* Enable and suspend the port task scheduler */
  1275. sci_port_enable_port_task_scheduler(iport);
  1276. }
  1277. static void sci_port_ready_state_enter(struct sci_base_state_machine *sm)
  1278. {
  1279. struct isci_port *iport = container_of(sm, typeof(*iport), sm);
  1280. struct isci_host *ihost = iport->owning_controller;
  1281. u32 prev_state;
  1282. prev_state = iport->sm.previous_state_id;
  1283. if (prev_state == SCI_PORT_RESETTING)
  1284. isci_port_hard_reset_complete(iport, SCI_SUCCESS);
  1285. else
  1286. dev_dbg(&ihost->pdev->dev, "%s: port%d !ready\n",
  1287. __func__, iport->physical_port_index);
  1288. /* Post and suspend the dummy remote node context for this port. */
  1289. sci_port_post_dummy_remote_node(iport);
  1290. /* Start the ready substate machine */
  1291. port_state_machine_change(iport,
  1292. SCI_PORT_SUB_WAITING);
  1293. }
  1294. static void sci_port_resetting_state_exit(struct sci_base_state_machine *sm)
  1295. {
  1296. struct isci_port *iport = container_of(sm, typeof(*iport), sm);
  1297. sci_del_timer(&iport->timer);
  1298. }
  1299. static void sci_port_stopping_state_exit(struct sci_base_state_machine *sm)
  1300. {
  1301. struct isci_port *iport = container_of(sm, typeof(*iport), sm);
  1302. sci_del_timer(&iport->timer);
  1303. sci_port_destroy_dummy_resources(iport);
  1304. }
  1305. static void sci_port_failed_state_enter(struct sci_base_state_machine *sm)
  1306. {
  1307. struct isci_port *iport = container_of(sm, typeof(*iport), sm);
  1308. isci_port_hard_reset_complete(iport, SCI_FAILURE_TIMEOUT);
  1309. }
  1310. /* --------------------------------------------------------------------------- */
  1311. static const struct sci_base_state sci_port_state_table[] = {
  1312. [SCI_PORT_STOPPED] = {
  1313. .enter_state = sci_port_stopped_state_enter,
  1314. .exit_state = sci_port_stopped_state_exit
  1315. },
  1316. [SCI_PORT_STOPPING] = {
  1317. .exit_state = sci_port_stopping_state_exit
  1318. },
  1319. [SCI_PORT_READY] = {
  1320. .enter_state = sci_port_ready_state_enter,
  1321. },
  1322. [SCI_PORT_SUB_WAITING] = {
  1323. .enter_state = sci_port_ready_substate_waiting_enter,
  1324. .exit_state = scic_sds_port_ready_substate_waiting_exit,
  1325. },
  1326. [SCI_PORT_SUB_OPERATIONAL] = {
  1327. .enter_state = sci_port_ready_substate_operational_enter,
  1328. .exit_state = sci_port_ready_substate_operational_exit
  1329. },
  1330. [SCI_PORT_SUB_CONFIGURING] = {
  1331. .enter_state = sci_port_ready_substate_configuring_enter
  1332. },
  1333. [SCI_PORT_RESETTING] = {
  1334. .exit_state = sci_port_resetting_state_exit
  1335. },
  1336. [SCI_PORT_FAILED] = {
  1337. .enter_state = sci_port_failed_state_enter,
  1338. }
  1339. };
  1340. void sci_port_construct(struct isci_port *iport, u8 index,
  1341. struct isci_host *ihost)
  1342. {
  1343. sci_init_sm(&iport->sm, sci_port_state_table, SCI_PORT_STOPPED);
  1344. iport->logical_port_index = SCIC_SDS_DUMMY_PORT;
  1345. iport->physical_port_index = index;
  1346. iport->active_phy_mask = 0;
  1347. iport->enabled_phy_mask = 0;
  1348. iport->last_active_phy = 0;
  1349. iport->ready_exit = false;
  1350. iport->owning_controller = ihost;
  1351. iport->started_request_count = 0;
  1352. iport->assigned_device_count = 0;
  1353. iport->reserved_rni = SCU_DUMMY_INDEX;
  1354. iport->reserved_tag = SCI_CONTROLLER_INVALID_IO_TAG;
  1355. sci_init_timer(&iport->timer, port_timeout);
  1356. iport->port_task_scheduler_registers = NULL;
  1357. for (index = 0; index < SCI_MAX_PHYS; index++)
  1358. iport->phy_table[index] = NULL;
  1359. }
  1360. void isci_port_init(struct isci_port *iport, struct isci_host *ihost, int index)
  1361. {
  1362. INIT_LIST_HEAD(&iport->remote_dev_list);
  1363. INIT_LIST_HEAD(&iport->domain_dev_list);
  1364. iport->isci_host = ihost;
  1365. }
  1366. void sci_port_broadcast_change_received(struct isci_port *iport, struct isci_phy *iphy)
  1367. {
  1368. struct isci_host *ihost = iport->owning_controller;
  1369. /* notify the user. */
  1370. isci_port_bc_change_received(ihost, iport, iphy);
  1371. }
  1372. static void wait_port_reset(struct isci_host *ihost, struct isci_port *iport)
  1373. {
  1374. wait_event(ihost->eventq, !test_bit(IPORT_RESET_PENDING, &iport->state));
  1375. }
  1376. int isci_port_perform_hard_reset(struct isci_host *ihost, struct isci_port *iport,
  1377. struct isci_phy *iphy)
  1378. {
  1379. unsigned long flags;
  1380. enum sci_status status;
  1381. int ret = TMF_RESP_FUNC_COMPLETE;
  1382. dev_dbg(&ihost->pdev->dev, "%s: iport = %p\n",
  1383. __func__, iport);
  1384. spin_lock_irqsave(&ihost->scic_lock, flags);
  1385. set_bit(IPORT_RESET_PENDING, &iport->state);
  1386. #define ISCI_PORT_RESET_TIMEOUT SCIC_SDS_SIGNATURE_FIS_TIMEOUT
  1387. status = sci_port_hard_reset(iport, ISCI_PORT_RESET_TIMEOUT);
  1388. spin_unlock_irqrestore(&ihost->scic_lock, flags);
  1389. if (status == SCI_SUCCESS) {
  1390. wait_port_reset(ihost, iport);
  1391. dev_dbg(&ihost->pdev->dev,
  1392. "%s: iport = %p; hard reset completion\n",
  1393. __func__, iport);
  1394. if (iport->hard_reset_status != SCI_SUCCESS) {
  1395. ret = TMF_RESP_FUNC_FAILED;
  1396. dev_err(&ihost->pdev->dev,
  1397. "%s: iport = %p; hard reset failed (0x%x)\n",
  1398. __func__, iport, iport->hard_reset_status);
  1399. }
  1400. } else {
  1401. clear_bit(IPORT_RESET_PENDING, &iport->state);
  1402. wake_up(&ihost->eventq);
  1403. ret = TMF_RESP_FUNC_FAILED;
  1404. dev_err(&ihost->pdev->dev,
  1405. "%s: iport = %p; sci_port_hard_reset call"
  1406. " failed 0x%x\n",
  1407. __func__, iport, status);
  1408. }
  1409. /* If the hard reset for the port has failed, consider this
  1410. * the same as link failures on all phys in the port.
  1411. */
  1412. if (ret != TMF_RESP_FUNC_COMPLETE) {
  1413. dev_err(&ihost->pdev->dev,
  1414. "%s: iport = %p; hard reset failed "
  1415. "(0x%x) - driving explicit link fail for all phys\n",
  1416. __func__, iport, iport->hard_reset_status);
  1417. }
  1418. return ret;
  1419. }
  1420. int isci_ata_check_ready(struct domain_device *dev)
  1421. {
  1422. struct isci_port *iport = dev->port->lldd_port;
  1423. struct isci_host *ihost = dev_to_ihost(dev);
  1424. struct isci_remote_device *idev;
  1425. unsigned long flags;
  1426. int rc = 0;
  1427. spin_lock_irqsave(&ihost->scic_lock, flags);
  1428. idev = isci_lookup_device(dev);
  1429. spin_unlock_irqrestore(&ihost->scic_lock, flags);
  1430. if (!idev)
  1431. goto out;
  1432. if (test_bit(IPORT_RESET_PENDING, &iport->state))
  1433. goto out;
  1434. rc = !!iport->active_phy_mask;
  1435. out:
  1436. isci_put_device(idev);
  1437. return rc;
  1438. }
  1439. void isci_port_deformed(struct asd_sas_phy *phy)
  1440. {
  1441. struct isci_host *ihost = phy->ha->lldd_ha;
  1442. struct isci_port *iport = phy->port->lldd_port;
  1443. unsigned long flags;
  1444. int i;
  1445. /* we got a port notification on a port that was subsequently
  1446. * torn down and libsas is just now catching up
  1447. */
  1448. if (!iport)
  1449. return;
  1450. spin_lock_irqsave(&ihost->scic_lock, flags);
  1451. for (i = 0; i < SCI_MAX_PHYS; i++) {
  1452. if (iport->active_phy_mask & 1 << i)
  1453. break;
  1454. }
  1455. spin_unlock_irqrestore(&ihost->scic_lock, flags);
  1456. if (i >= SCI_MAX_PHYS)
  1457. dev_dbg(&ihost->pdev->dev, "%s: port: %ld\n",
  1458. __func__, (long) (iport - &ihost->ports[0]));
  1459. }
  1460. void isci_port_formed(struct asd_sas_phy *phy)
  1461. {
  1462. struct isci_host *ihost = phy->ha->lldd_ha;
  1463. struct isci_phy *iphy = to_iphy(phy);
  1464. struct asd_sas_port *port = phy->port;
  1465. struct isci_port *iport;
  1466. unsigned long flags;
  1467. int i;
  1468. /* initial ports are formed as the driver is still initializing,
  1469. * wait for that process to complete
  1470. */
  1471. wait_for_start(ihost);
  1472. spin_lock_irqsave(&ihost->scic_lock, flags);
  1473. for (i = 0; i < SCI_MAX_PORTS; i++) {
  1474. iport = &ihost->ports[i];
  1475. if (iport->active_phy_mask & 1 << iphy->phy_index)
  1476. break;
  1477. }
  1478. spin_unlock_irqrestore(&ihost->scic_lock, flags);
  1479. if (i >= SCI_MAX_PORTS)
  1480. iport = NULL;
  1481. port->lldd_port = iport;
  1482. }