pci-imx6.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758
  1. /*
  2. * PCIe host controller driver for Freescale i.MX6 SoCs
  3. *
  4. * Copyright (C) 2013 Kosagi
  5. * http://www.kosagi.com
  6. *
  7. * Author: Sean Cross <xobs@kosagi.com>
  8. *
  9. * This program is free software; you can redistribute it and/or modify
  10. * it under the terms of the GNU General Public License version 2 as
  11. * published by the Free Software Foundation.
  12. */
  13. #include <linux/clk.h>
  14. #include <linux/delay.h>
  15. #include <linux/gpio.h>
  16. #include <linux/kernel.h>
  17. #include <linux/mfd/syscon.h>
  18. #include <linux/mfd/syscon/imx6q-iomuxc-gpr.h>
  19. #include <linux/module.h>
  20. #include <linux/of_gpio.h>
  21. #include <linux/of_device.h>
  22. #include <linux/pci.h>
  23. #include <linux/platform_device.h>
  24. #include <linux/regmap.h>
  25. #include <linux/resource.h>
  26. #include <linux/signal.h>
  27. #include <linux/types.h>
  28. #include <linux/interrupt.h>
  29. #include "pcie-designware.h"
  30. #define to_imx6_pcie(x) container_of(x, struct imx6_pcie, pp)
  31. enum imx6_pcie_variants {
  32. IMX6Q,
  33. IMX6SX,
  34. IMX6QP,
  35. };
  36. struct imx6_pcie {
  37. struct pcie_port pp; /* pp.dbi_base is DT 0th resource */
  38. int reset_gpio;
  39. bool gpio_active_high;
  40. struct clk *pcie_bus;
  41. struct clk *pcie_phy;
  42. struct clk *pcie_inbound_axi;
  43. struct clk *pcie;
  44. struct regmap *iomuxc_gpr;
  45. enum imx6_pcie_variants variant;
  46. u32 tx_deemph_gen1;
  47. u32 tx_deemph_gen2_3p5db;
  48. u32 tx_deemph_gen2_6db;
  49. u32 tx_swing_full;
  50. u32 tx_swing_low;
  51. int link_gen;
  52. };
  53. /* PCIe Root Complex registers (memory-mapped) */
  54. #define PCIE_RC_LCR 0x7c
  55. #define PCIE_RC_LCR_MAX_LINK_SPEEDS_GEN1 0x1
  56. #define PCIE_RC_LCR_MAX_LINK_SPEEDS_GEN2 0x2
  57. #define PCIE_RC_LCR_MAX_LINK_SPEEDS_MASK 0xf
  58. #define PCIE_RC_LCSR 0x80
  59. /* PCIe Port Logic registers (memory-mapped) */
  60. #define PL_OFFSET 0x700
  61. #define PCIE_PL_PFLR (PL_OFFSET + 0x08)
  62. #define PCIE_PL_PFLR_LINK_STATE_MASK (0x3f << 16)
  63. #define PCIE_PL_PFLR_FORCE_LINK (1 << 15)
  64. #define PCIE_PHY_DEBUG_R0 (PL_OFFSET + 0x28)
  65. #define PCIE_PHY_DEBUG_R1 (PL_OFFSET + 0x2c)
  66. #define PCIE_PHY_DEBUG_R1_XMLH_LINK_IN_TRAINING (1 << 29)
  67. #define PCIE_PHY_DEBUG_R1_XMLH_LINK_UP (1 << 4)
  68. #define PCIE_PHY_CTRL (PL_OFFSET + 0x114)
  69. #define PCIE_PHY_CTRL_DATA_LOC 0
  70. #define PCIE_PHY_CTRL_CAP_ADR_LOC 16
  71. #define PCIE_PHY_CTRL_CAP_DAT_LOC 17
  72. #define PCIE_PHY_CTRL_WR_LOC 18
  73. #define PCIE_PHY_CTRL_RD_LOC 19
  74. #define PCIE_PHY_STAT (PL_OFFSET + 0x110)
  75. #define PCIE_PHY_STAT_ACK_LOC 16
  76. #define PCIE_LINK_WIDTH_SPEED_CONTROL 0x80C
  77. #define PORT_LOGIC_SPEED_CHANGE (0x1 << 17)
  78. /* PHY registers (not memory-mapped) */
  79. #define PCIE_PHY_RX_ASIC_OUT 0x100D
  80. #define PCIE_PHY_RX_ASIC_OUT_VALID (1 << 0)
  81. #define PHY_RX_OVRD_IN_LO 0x1005
  82. #define PHY_RX_OVRD_IN_LO_RX_DATA_EN (1 << 5)
  83. #define PHY_RX_OVRD_IN_LO_RX_PLL_EN (1 << 3)
  84. static int pcie_phy_poll_ack(struct imx6_pcie *imx6_pcie, int exp_val)
  85. {
  86. struct pcie_port *pp = &imx6_pcie->pp;
  87. u32 val;
  88. u32 max_iterations = 10;
  89. u32 wait_counter = 0;
  90. do {
  91. val = dw_pcie_readl_rc(pp, PCIE_PHY_STAT);
  92. val = (val >> PCIE_PHY_STAT_ACK_LOC) & 0x1;
  93. wait_counter++;
  94. if (val == exp_val)
  95. return 0;
  96. udelay(1);
  97. } while (wait_counter < max_iterations);
  98. return -ETIMEDOUT;
  99. }
  100. static int pcie_phy_wait_ack(struct imx6_pcie *imx6_pcie, int addr)
  101. {
  102. struct pcie_port *pp = &imx6_pcie->pp;
  103. u32 val;
  104. int ret;
  105. val = addr << PCIE_PHY_CTRL_DATA_LOC;
  106. dw_pcie_writel_rc(pp, PCIE_PHY_CTRL, val);
  107. val |= (0x1 << PCIE_PHY_CTRL_CAP_ADR_LOC);
  108. dw_pcie_writel_rc(pp, PCIE_PHY_CTRL, val);
  109. ret = pcie_phy_poll_ack(imx6_pcie, 1);
  110. if (ret)
  111. return ret;
  112. val = addr << PCIE_PHY_CTRL_DATA_LOC;
  113. dw_pcie_writel_rc(pp, PCIE_PHY_CTRL, val);
  114. return pcie_phy_poll_ack(imx6_pcie, 0);
  115. }
  116. /* Read from the 16-bit PCIe PHY control registers (not memory-mapped) */
  117. static int pcie_phy_read(struct imx6_pcie *imx6_pcie, int addr, int *data)
  118. {
  119. struct pcie_port *pp = &imx6_pcie->pp;
  120. u32 val, phy_ctl;
  121. int ret;
  122. ret = pcie_phy_wait_ack(imx6_pcie, addr);
  123. if (ret)
  124. return ret;
  125. /* assert Read signal */
  126. phy_ctl = 0x1 << PCIE_PHY_CTRL_RD_LOC;
  127. dw_pcie_writel_rc(pp, PCIE_PHY_CTRL, phy_ctl);
  128. ret = pcie_phy_poll_ack(imx6_pcie, 1);
  129. if (ret)
  130. return ret;
  131. val = dw_pcie_readl_rc(pp, PCIE_PHY_STAT);
  132. *data = val & 0xffff;
  133. /* deassert Read signal */
  134. dw_pcie_writel_rc(pp, PCIE_PHY_CTRL, 0x00);
  135. return pcie_phy_poll_ack(imx6_pcie, 0);
  136. }
  137. static int pcie_phy_write(struct imx6_pcie *imx6_pcie, int addr, int data)
  138. {
  139. struct pcie_port *pp = &imx6_pcie->pp;
  140. u32 var;
  141. int ret;
  142. /* write addr */
  143. /* cap addr */
  144. ret = pcie_phy_wait_ack(imx6_pcie, addr);
  145. if (ret)
  146. return ret;
  147. var = data << PCIE_PHY_CTRL_DATA_LOC;
  148. dw_pcie_writel_rc(pp, PCIE_PHY_CTRL, var);
  149. /* capture data */
  150. var |= (0x1 << PCIE_PHY_CTRL_CAP_DAT_LOC);
  151. dw_pcie_writel_rc(pp, PCIE_PHY_CTRL, var);
  152. ret = pcie_phy_poll_ack(imx6_pcie, 1);
  153. if (ret)
  154. return ret;
  155. /* deassert cap data */
  156. var = data << PCIE_PHY_CTRL_DATA_LOC;
  157. dw_pcie_writel_rc(pp, PCIE_PHY_CTRL, var);
  158. /* wait for ack de-assertion */
  159. ret = pcie_phy_poll_ack(imx6_pcie, 0);
  160. if (ret)
  161. return ret;
  162. /* assert wr signal */
  163. var = 0x1 << PCIE_PHY_CTRL_WR_LOC;
  164. dw_pcie_writel_rc(pp, PCIE_PHY_CTRL, var);
  165. /* wait for ack */
  166. ret = pcie_phy_poll_ack(imx6_pcie, 1);
  167. if (ret)
  168. return ret;
  169. /* deassert wr signal */
  170. var = data << PCIE_PHY_CTRL_DATA_LOC;
  171. dw_pcie_writel_rc(pp, PCIE_PHY_CTRL, var);
  172. /* wait for ack de-assertion */
  173. ret = pcie_phy_poll_ack(imx6_pcie, 0);
  174. if (ret)
  175. return ret;
  176. dw_pcie_writel_rc(pp, PCIE_PHY_CTRL, 0x0);
  177. return 0;
  178. }
  179. static void imx6_pcie_reset_phy(struct imx6_pcie *imx6_pcie)
  180. {
  181. u32 tmp;
  182. pcie_phy_read(imx6_pcie, PHY_RX_OVRD_IN_LO, &tmp);
  183. tmp |= (PHY_RX_OVRD_IN_LO_RX_DATA_EN |
  184. PHY_RX_OVRD_IN_LO_RX_PLL_EN);
  185. pcie_phy_write(imx6_pcie, PHY_RX_OVRD_IN_LO, tmp);
  186. usleep_range(2000, 3000);
  187. pcie_phy_read(imx6_pcie, PHY_RX_OVRD_IN_LO, &tmp);
  188. tmp &= ~(PHY_RX_OVRD_IN_LO_RX_DATA_EN |
  189. PHY_RX_OVRD_IN_LO_RX_PLL_EN);
  190. pcie_phy_write(imx6_pcie, PHY_RX_OVRD_IN_LO, tmp);
  191. }
  192. /* Added for PCI abort handling */
  193. static int imx6q_pcie_abort_handler(unsigned long addr,
  194. unsigned int fsr, struct pt_regs *regs)
  195. {
  196. return 0;
  197. }
  198. static void imx6_pcie_assert_core_reset(struct imx6_pcie *imx6_pcie)
  199. {
  200. struct pcie_port *pp = &imx6_pcie->pp;
  201. u32 val, gpr1, gpr12;
  202. switch (imx6_pcie->variant) {
  203. case IMX6SX:
  204. regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
  205. IMX6SX_GPR12_PCIE_TEST_POWERDOWN,
  206. IMX6SX_GPR12_PCIE_TEST_POWERDOWN);
  207. /* Force PCIe PHY reset */
  208. regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR5,
  209. IMX6SX_GPR5_PCIE_BTNRST_RESET,
  210. IMX6SX_GPR5_PCIE_BTNRST_RESET);
  211. break;
  212. case IMX6QP:
  213. regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
  214. IMX6Q_GPR1_PCIE_SW_RST,
  215. IMX6Q_GPR1_PCIE_SW_RST);
  216. break;
  217. case IMX6Q:
  218. /*
  219. * If the bootloader already enabled the link we need some
  220. * special handling to get the core back into a state where
  221. * it is safe to touch it for configuration. As there is
  222. * no dedicated reset signal wired up for MX6QDL, we need
  223. * to manually force LTSSM into "detect" state before
  224. * completely disabling LTSSM, which is a prerequisite for
  225. * core configuration.
  226. *
  227. * If both LTSSM_ENABLE and REF_SSP_ENABLE are active we
  228. * have a strong indication that the bootloader activated
  229. * the link.
  230. */
  231. regmap_read(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1, &gpr1);
  232. regmap_read(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12, &gpr12);
  233. if ((gpr1 & IMX6Q_GPR1_PCIE_REF_CLK_EN) &&
  234. (gpr12 & IMX6Q_GPR12_PCIE_CTL_2)) {
  235. val = dw_pcie_readl_rc(pp, PCIE_PL_PFLR);
  236. val &= ~PCIE_PL_PFLR_LINK_STATE_MASK;
  237. val |= PCIE_PL_PFLR_FORCE_LINK;
  238. dw_pcie_writel_rc(pp, PCIE_PL_PFLR, val);
  239. regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
  240. IMX6Q_GPR12_PCIE_CTL_2, 0 << 10);
  241. }
  242. regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
  243. IMX6Q_GPR1_PCIE_TEST_PD, 1 << 18);
  244. regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
  245. IMX6Q_GPR1_PCIE_REF_CLK_EN, 0 << 16);
  246. break;
  247. }
  248. }
  249. static int imx6_pcie_enable_ref_clk(struct imx6_pcie *imx6_pcie)
  250. {
  251. struct pcie_port *pp = &imx6_pcie->pp;
  252. struct device *dev = pp->dev;
  253. int ret = 0;
  254. switch (imx6_pcie->variant) {
  255. case IMX6SX:
  256. ret = clk_prepare_enable(imx6_pcie->pcie_inbound_axi);
  257. if (ret) {
  258. dev_err(dev, "unable to enable pcie_axi clock\n");
  259. break;
  260. }
  261. regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
  262. IMX6SX_GPR12_PCIE_TEST_POWERDOWN, 0);
  263. break;
  264. case IMX6QP: /* FALLTHROUGH */
  265. case IMX6Q:
  266. /* power up core phy and enable ref clock */
  267. regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
  268. IMX6Q_GPR1_PCIE_TEST_PD, 0 << 18);
  269. /*
  270. * the async reset input need ref clock to sync internally,
  271. * when the ref clock comes after reset, internal synced
  272. * reset time is too short, cannot meet the requirement.
  273. * add one ~10us delay here.
  274. */
  275. udelay(10);
  276. regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
  277. IMX6Q_GPR1_PCIE_REF_CLK_EN, 1 << 16);
  278. break;
  279. }
  280. return ret;
  281. }
  282. static void imx6_pcie_deassert_core_reset(struct imx6_pcie *imx6_pcie)
  283. {
  284. struct pcie_port *pp = &imx6_pcie->pp;
  285. struct device *dev = pp->dev;
  286. int ret;
  287. ret = clk_prepare_enable(imx6_pcie->pcie_phy);
  288. if (ret) {
  289. dev_err(dev, "unable to enable pcie_phy clock\n");
  290. return;
  291. }
  292. ret = clk_prepare_enable(imx6_pcie->pcie_bus);
  293. if (ret) {
  294. dev_err(dev, "unable to enable pcie_bus clock\n");
  295. goto err_pcie_bus;
  296. }
  297. ret = clk_prepare_enable(imx6_pcie->pcie);
  298. if (ret) {
  299. dev_err(dev, "unable to enable pcie clock\n");
  300. goto err_pcie;
  301. }
  302. ret = imx6_pcie_enable_ref_clk(imx6_pcie);
  303. if (ret) {
  304. dev_err(dev, "unable to enable pcie ref clock\n");
  305. goto err_ref_clk;
  306. }
  307. /* allow the clocks to stabilize */
  308. usleep_range(200, 500);
  309. /* Some boards don't have PCIe reset GPIO. */
  310. if (gpio_is_valid(imx6_pcie->reset_gpio)) {
  311. gpio_set_value_cansleep(imx6_pcie->reset_gpio,
  312. imx6_pcie->gpio_active_high);
  313. msleep(100);
  314. gpio_set_value_cansleep(imx6_pcie->reset_gpio,
  315. !imx6_pcie->gpio_active_high);
  316. }
  317. switch (imx6_pcie->variant) {
  318. case IMX6SX:
  319. regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR5,
  320. IMX6SX_GPR5_PCIE_BTNRST_RESET, 0);
  321. break;
  322. case IMX6QP:
  323. regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
  324. IMX6Q_GPR1_PCIE_SW_RST, 0);
  325. usleep_range(200, 500);
  326. break;
  327. case IMX6Q: /* Nothing to do */
  328. break;
  329. }
  330. return;
  331. err_ref_clk:
  332. clk_disable_unprepare(imx6_pcie->pcie);
  333. err_pcie:
  334. clk_disable_unprepare(imx6_pcie->pcie_bus);
  335. err_pcie_bus:
  336. clk_disable_unprepare(imx6_pcie->pcie_phy);
  337. }
  338. static void imx6_pcie_init_phy(struct imx6_pcie *imx6_pcie)
  339. {
  340. if (imx6_pcie->variant == IMX6SX)
  341. regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
  342. IMX6SX_GPR12_PCIE_RX_EQ_MASK,
  343. IMX6SX_GPR12_PCIE_RX_EQ_2);
  344. regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
  345. IMX6Q_GPR12_PCIE_CTL_2, 0 << 10);
  346. /* configure constant input signal to the pcie ctrl and phy */
  347. regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
  348. IMX6Q_GPR12_DEVICE_TYPE, PCI_EXP_TYPE_ROOT_PORT << 12);
  349. regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
  350. IMX6Q_GPR12_LOS_LEVEL, 9 << 4);
  351. regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
  352. IMX6Q_GPR8_TX_DEEMPH_GEN1,
  353. imx6_pcie->tx_deemph_gen1 << 0);
  354. regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
  355. IMX6Q_GPR8_TX_DEEMPH_GEN2_3P5DB,
  356. imx6_pcie->tx_deemph_gen2_3p5db << 6);
  357. regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
  358. IMX6Q_GPR8_TX_DEEMPH_GEN2_6DB,
  359. imx6_pcie->tx_deemph_gen2_6db << 12);
  360. regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
  361. IMX6Q_GPR8_TX_SWING_FULL,
  362. imx6_pcie->tx_swing_full << 18);
  363. regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
  364. IMX6Q_GPR8_TX_SWING_LOW,
  365. imx6_pcie->tx_swing_low << 25);
  366. }
  367. static int imx6_pcie_wait_for_link(struct imx6_pcie *imx6_pcie)
  368. {
  369. struct pcie_port *pp = &imx6_pcie->pp;
  370. struct device *dev = pp->dev;
  371. /* check if the link is up or not */
  372. if (!dw_pcie_wait_for_link(pp))
  373. return 0;
  374. dev_dbg(dev, "DEBUG_R0: 0x%08x, DEBUG_R1: 0x%08x\n",
  375. dw_pcie_readl_rc(pp, PCIE_PHY_DEBUG_R0),
  376. dw_pcie_readl_rc(pp, PCIE_PHY_DEBUG_R1));
  377. return -ETIMEDOUT;
  378. }
  379. static int imx6_pcie_wait_for_speed_change(struct imx6_pcie *imx6_pcie)
  380. {
  381. struct pcie_port *pp = &imx6_pcie->pp;
  382. struct device *dev = pp->dev;
  383. u32 tmp;
  384. unsigned int retries;
  385. for (retries = 0; retries < 200; retries++) {
  386. tmp = dw_pcie_readl_rc(pp, PCIE_LINK_WIDTH_SPEED_CONTROL);
  387. /* Test if the speed change finished. */
  388. if (!(tmp & PORT_LOGIC_SPEED_CHANGE))
  389. return 0;
  390. usleep_range(100, 1000);
  391. }
  392. dev_err(dev, "Speed change timeout\n");
  393. return -EINVAL;
  394. }
  395. static irqreturn_t imx6_pcie_msi_handler(int irq, void *arg)
  396. {
  397. struct imx6_pcie *imx6_pcie = arg;
  398. struct pcie_port *pp = &imx6_pcie->pp;
  399. return dw_handle_msi_irq(pp);
  400. }
  401. static int imx6_pcie_establish_link(struct imx6_pcie *imx6_pcie)
  402. {
  403. struct pcie_port *pp = &imx6_pcie->pp;
  404. struct device *dev = pp->dev;
  405. u32 tmp;
  406. int ret;
  407. /*
  408. * Force Gen1 operation when starting the link. In case the link is
  409. * started in Gen2 mode, there is a possibility the devices on the
  410. * bus will not be detected at all. This happens with PCIe switches.
  411. */
  412. tmp = dw_pcie_readl_rc(pp, PCIE_RC_LCR);
  413. tmp &= ~PCIE_RC_LCR_MAX_LINK_SPEEDS_MASK;
  414. tmp |= PCIE_RC_LCR_MAX_LINK_SPEEDS_GEN1;
  415. dw_pcie_writel_rc(pp, PCIE_RC_LCR, tmp);
  416. /* Start LTSSM. */
  417. regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
  418. IMX6Q_GPR12_PCIE_CTL_2, 1 << 10);
  419. ret = imx6_pcie_wait_for_link(imx6_pcie);
  420. if (ret) {
  421. dev_info(dev, "Link never came up\n");
  422. goto err_reset_phy;
  423. }
  424. if (imx6_pcie->link_gen == 2) {
  425. /* Allow Gen2 mode after the link is up. */
  426. tmp = dw_pcie_readl_rc(pp, PCIE_RC_LCR);
  427. tmp &= ~PCIE_RC_LCR_MAX_LINK_SPEEDS_MASK;
  428. tmp |= PCIE_RC_LCR_MAX_LINK_SPEEDS_GEN2;
  429. dw_pcie_writel_rc(pp, PCIE_RC_LCR, tmp);
  430. } else {
  431. dev_info(dev, "Link: Gen2 disabled\n");
  432. }
  433. /*
  434. * Start Directed Speed Change so the best possible speed both link
  435. * partners support can be negotiated.
  436. */
  437. tmp = dw_pcie_readl_rc(pp, PCIE_LINK_WIDTH_SPEED_CONTROL);
  438. tmp |= PORT_LOGIC_SPEED_CHANGE;
  439. dw_pcie_writel_rc(pp, PCIE_LINK_WIDTH_SPEED_CONTROL, tmp);
  440. ret = imx6_pcie_wait_for_speed_change(imx6_pcie);
  441. if (ret) {
  442. dev_err(dev, "Failed to bring link up!\n");
  443. goto err_reset_phy;
  444. }
  445. /* Make sure link training is finished as well! */
  446. ret = imx6_pcie_wait_for_link(imx6_pcie);
  447. if (ret) {
  448. dev_err(dev, "Failed to bring link up!\n");
  449. goto err_reset_phy;
  450. }
  451. tmp = dw_pcie_readl_rc(pp, PCIE_RC_LCSR);
  452. dev_info(dev, "Link up, Gen%i\n", (tmp >> 16) & 0xf);
  453. return 0;
  454. err_reset_phy:
  455. dev_dbg(dev, "PHY DEBUG_R0=0x%08x DEBUG_R1=0x%08x\n",
  456. dw_pcie_readl_rc(pp, PCIE_PHY_DEBUG_R0),
  457. dw_pcie_readl_rc(pp, PCIE_PHY_DEBUG_R1));
  458. imx6_pcie_reset_phy(imx6_pcie);
  459. return ret;
  460. }
  461. static void imx6_pcie_host_init(struct pcie_port *pp)
  462. {
  463. struct imx6_pcie *imx6_pcie = to_imx6_pcie(pp);
  464. imx6_pcie_assert_core_reset(imx6_pcie);
  465. imx6_pcie_init_phy(imx6_pcie);
  466. imx6_pcie_deassert_core_reset(imx6_pcie);
  467. dw_pcie_setup_rc(pp);
  468. imx6_pcie_establish_link(imx6_pcie);
  469. if (IS_ENABLED(CONFIG_PCI_MSI))
  470. dw_pcie_msi_init(pp);
  471. }
  472. static int imx6_pcie_link_up(struct pcie_port *pp)
  473. {
  474. return dw_pcie_readl_rc(pp, PCIE_PHY_DEBUG_R1) &
  475. PCIE_PHY_DEBUG_R1_XMLH_LINK_UP;
  476. }
  477. static struct pcie_host_ops imx6_pcie_host_ops = {
  478. .link_up = imx6_pcie_link_up,
  479. .host_init = imx6_pcie_host_init,
  480. };
  481. static int __init imx6_add_pcie_port(struct imx6_pcie *imx6_pcie,
  482. struct platform_device *pdev)
  483. {
  484. struct pcie_port *pp = &imx6_pcie->pp;
  485. struct device *dev = pp->dev;
  486. int ret;
  487. if (IS_ENABLED(CONFIG_PCI_MSI)) {
  488. pp->msi_irq = platform_get_irq_byname(pdev, "msi");
  489. if (pp->msi_irq <= 0) {
  490. dev_err(dev, "failed to get MSI irq\n");
  491. return -ENODEV;
  492. }
  493. ret = devm_request_irq(dev, pp->msi_irq,
  494. imx6_pcie_msi_handler,
  495. IRQF_SHARED | IRQF_NO_THREAD,
  496. "mx6-pcie-msi", imx6_pcie);
  497. if (ret) {
  498. dev_err(dev, "failed to request MSI irq\n");
  499. return ret;
  500. }
  501. }
  502. pp->root_bus_nr = -1;
  503. pp->ops = &imx6_pcie_host_ops;
  504. ret = dw_pcie_host_init(pp);
  505. if (ret) {
  506. dev_err(dev, "failed to initialize host\n");
  507. return ret;
  508. }
  509. return 0;
  510. }
  511. static int __init imx6_pcie_probe(struct platform_device *pdev)
  512. {
  513. struct device *dev = &pdev->dev;
  514. struct imx6_pcie *imx6_pcie;
  515. struct pcie_port *pp;
  516. struct resource *dbi_base;
  517. struct device_node *node = dev->of_node;
  518. int ret;
  519. imx6_pcie = devm_kzalloc(dev, sizeof(*imx6_pcie), GFP_KERNEL);
  520. if (!imx6_pcie)
  521. return -ENOMEM;
  522. pp = &imx6_pcie->pp;
  523. pp->dev = dev;
  524. imx6_pcie->variant =
  525. (enum imx6_pcie_variants)of_device_get_match_data(dev);
  526. /* Added for PCI abort handling */
  527. hook_fault_code(16 + 6, imx6q_pcie_abort_handler, SIGBUS, 0,
  528. "imprecise external abort");
  529. dbi_base = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  530. pp->dbi_base = devm_ioremap_resource(dev, dbi_base);
  531. if (IS_ERR(pp->dbi_base))
  532. return PTR_ERR(pp->dbi_base);
  533. /* Fetch GPIOs */
  534. imx6_pcie->reset_gpio = of_get_named_gpio(node, "reset-gpio", 0);
  535. imx6_pcie->gpio_active_high = of_property_read_bool(node,
  536. "reset-gpio-active-high");
  537. if (gpio_is_valid(imx6_pcie->reset_gpio)) {
  538. ret = devm_gpio_request_one(dev, imx6_pcie->reset_gpio,
  539. imx6_pcie->gpio_active_high ?
  540. GPIOF_OUT_INIT_HIGH :
  541. GPIOF_OUT_INIT_LOW,
  542. "PCIe reset");
  543. if (ret) {
  544. dev_err(dev, "unable to get reset gpio\n");
  545. return ret;
  546. }
  547. }
  548. /* Fetch clocks */
  549. imx6_pcie->pcie_phy = devm_clk_get(dev, "pcie_phy");
  550. if (IS_ERR(imx6_pcie->pcie_phy)) {
  551. dev_err(dev, "pcie_phy clock source missing or invalid\n");
  552. return PTR_ERR(imx6_pcie->pcie_phy);
  553. }
  554. imx6_pcie->pcie_bus = devm_clk_get(dev, "pcie_bus");
  555. if (IS_ERR(imx6_pcie->pcie_bus)) {
  556. dev_err(dev, "pcie_bus clock source missing or invalid\n");
  557. return PTR_ERR(imx6_pcie->pcie_bus);
  558. }
  559. imx6_pcie->pcie = devm_clk_get(dev, "pcie");
  560. if (IS_ERR(imx6_pcie->pcie)) {
  561. dev_err(dev, "pcie clock source missing or invalid\n");
  562. return PTR_ERR(imx6_pcie->pcie);
  563. }
  564. if (imx6_pcie->variant == IMX6SX) {
  565. imx6_pcie->pcie_inbound_axi = devm_clk_get(dev,
  566. "pcie_inbound_axi");
  567. if (IS_ERR(imx6_pcie->pcie_inbound_axi)) {
  568. dev_err(dev,
  569. "pcie_incbound_axi clock missing or invalid\n");
  570. return PTR_ERR(imx6_pcie->pcie_inbound_axi);
  571. }
  572. }
  573. /* Grab GPR config register range */
  574. imx6_pcie->iomuxc_gpr =
  575. syscon_regmap_lookup_by_compatible("fsl,imx6q-iomuxc-gpr");
  576. if (IS_ERR(imx6_pcie->iomuxc_gpr)) {
  577. dev_err(dev, "unable to find iomuxc registers\n");
  578. return PTR_ERR(imx6_pcie->iomuxc_gpr);
  579. }
  580. /* Grab PCIe PHY Tx Settings */
  581. if (of_property_read_u32(node, "fsl,tx-deemph-gen1",
  582. &imx6_pcie->tx_deemph_gen1))
  583. imx6_pcie->tx_deemph_gen1 = 0;
  584. if (of_property_read_u32(node, "fsl,tx-deemph-gen2-3p5db",
  585. &imx6_pcie->tx_deemph_gen2_3p5db))
  586. imx6_pcie->tx_deemph_gen2_3p5db = 0;
  587. if (of_property_read_u32(node, "fsl,tx-deemph-gen2-6db",
  588. &imx6_pcie->tx_deemph_gen2_6db))
  589. imx6_pcie->tx_deemph_gen2_6db = 20;
  590. if (of_property_read_u32(node, "fsl,tx-swing-full",
  591. &imx6_pcie->tx_swing_full))
  592. imx6_pcie->tx_swing_full = 127;
  593. if (of_property_read_u32(node, "fsl,tx-swing-low",
  594. &imx6_pcie->tx_swing_low))
  595. imx6_pcie->tx_swing_low = 127;
  596. /* Limit link speed */
  597. ret = of_property_read_u32(node, "fsl,max-link-speed",
  598. &imx6_pcie->link_gen);
  599. if (ret)
  600. imx6_pcie->link_gen = 1;
  601. ret = imx6_add_pcie_port(imx6_pcie, pdev);
  602. if (ret < 0)
  603. return ret;
  604. platform_set_drvdata(pdev, imx6_pcie);
  605. return 0;
  606. }
  607. static void imx6_pcie_shutdown(struct platform_device *pdev)
  608. {
  609. struct imx6_pcie *imx6_pcie = platform_get_drvdata(pdev);
  610. /* bring down link, so bootloader gets clean state in case of reboot */
  611. imx6_pcie_assert_core_reset(imx6_pcie);
  612. }
  613. static const struct of_device_id imx6_pcie_of_match[] = {
  614. { .compatible = "fsl,imx6q-pcie", .data = (void *)IMX6Q, },
  615. { .compatible = "fsl,imx6sx-pcie", .data = (void *)IMX6SX, },
  616. { .compatible = "fsl,imx6qp-pcie", .data = (void *)IMX6QP, },
  617. {},
  618. };
  619. static struct platform_driver imx6_pcie_driver = {
  620. .driver = {
  621. .name = "imx6q-pcie",
  622. .of_match_table = imx6_pcie_of_match,
  623. },
  624. .shutdown = imx6_pcie_shutdown,
  625. };
  626. static int __init imx6_pcie_init(void)
  627. {
  628. return platform_driver_probe(&imx6_pcie_driver, imx6_pcie_probe);
  629. }
  630. device_initcall(imx6_pcie_init);