i2c-qup.c 50 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901
  1. /* Copyright (c) 2009-2014, The Linux Foundation. All rights reserved.
  2. *
  3. * This program is free software; you can redistribute it and/or modify
  4. * it under the terms of the GNU General Public License version 2 and
  5. * only version 2 as published by the Free Software Foundation.
  6. *
  7. * This program is distributed in the hope that it will be useful,
  8. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  9. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  10. * GNU General Public License for more details.
  11. *
  12. */
  13. /*
  14. * QUP driver for Qualcomm MSM platforms
  15. *
  16. */
  17. /* #define DEBUG */
  18. #include <linux/module.h>
  19. #include <linux/clk.h>
  20. #include <linux/err.h>
  21. #include <linux/init.h>
  22. #include <linux/i2c.h>
  23. #include <linux/i2c/i2c-qup.h>
  24. #include <linux/interrupt.h>
  25. #include <linux/platform_device.h>
  26. #include <linux/delay.h>
  27. #include <linux/io.h>
  28. #include <linux/mutex.h>
  29. #include <linux/timer.h>
  30. #include <linux/slab.h>
  31. #include <linux/slab.h>
  32. #include <linux/pm_runtime.h>
  33. #include <linux/gpio.h>
  34. #include <linux/of.h>
  35. #include <linux/of_i2c.h>
  36. #include <linux/of_gpio.h>
  37. #include <mach/board.h>
  38. #include <mach/gpiomux.h>
  39. #include <mach/msm_bus_board.h>
  40. MODULE_LICENSE("GPL v2");
  41. MODULE_VERSION("0.2");
  42. MODULE_ALIAS("platform:i2c_qup");
  43. /* QUP Registers */
  44. enum {
  45. QUP_CONFIG = 0x0,
  46. QUP_STATE = 0x4,
  47. QUP_IO_MODE = 0x8,
  48. QUP_SW_RESET = 0xC,
  49. QUP_OPERATIONAL = 0x18,
  50. QUP_ERROR_FLAGS = 0x1C,
  51. QUP_ERROR_FLAGS_EN = 0x20,
  52. QUP_MX_READ_CNT = 0x208,
  53. QUP_MX_INPUT_CNT = 0x200,
  54. QUP_MX_WR_CNT = 0x100,
  55. QUP_OUT_DEBUG = 0x108,
  56. QUP_OUT_FIFO_CNT = 0x10C,
  57. QUP_OUT_FIFO_BASE = 0x110,
  58. QUP_IN_READ_CUR = 0x20C,
  59. QUP_IN_DEBUG = 0x210,
  60. QUP_IN_FIFO_CNT = 0x214,
  61. QUP_IN_FIFO_BASE = 0x218,
  62. QUP_I2C_CLK_CTL = 0x400,
  63. QUP_I2C_STATUS = 0x404,
  64. };
  65. /* QUP States and reset values */
  66. enum {
  67. QUP_RESET_STATE = 0,
  68. QUP_RUN_STATE = 1U,
  69. QUP_STATE_MASK = 3U,
  70. QUP_PAUSE_STATE = 3U,
  71. QUP_STATE_VALID = 1U << 2,
  72. QUP_I2C_MAST_GEN = 1U << 4,
  73. QUP_OPERATIONAL_RESET = 0xFF0,
  74. QUP_I2C_STATUS_RESET = 0xFFFFFC,
  75. };
  76. /* QUP OPERATIONAL FLAGS */
  77. enum {
  78. QUP_OUT_SVC_FLAG = 1U << 8,
  79. QUP_IN_SVC_FLAG = 1U << 9,
  80. QUP_MX_INPUT_DONE = 1U << 11,
  81. };
  82. /* QUP_CONFIG values and flags */
  83. enum {
  84. I2C_MINI_CORE = 2U << 8,
  85. I2C_N_VAL = 0xF,
  86. I2C_CORE_CLK_ON_EN = BIT(13),
  87. };
  88. /* Packing Unpacking words in FIFOs , and IO modes*/
  89. enum {
  90. QUP_WR_BLK_MODE = 1U << 10,
  91. QUP_RD_BLK_MODE = 1U << 12,
  92. QUP_UNPACK_EN = 1U << 14,
  93. QUP_PACK_EN = 1U << 15,
  94. };
  95. /* QUP tags */
  96. enum {
  97. QUP_OUT_NOP = 0,
  98. QUP_OUT_START = 1U << 8,
  99. QUP_OUT_DATA = 2U << 8,
  100. QUP_OUT_STOP = 3U << 8,
  101. QUP_OUT_REC = 4U << 8,
  102. QUP_IN_DATA = 5U << 8,
  103. QUP_IN_STOP = 6U << 8,
  104. QUP_IN_NACK = 7U << 8,
  105. };
  106. /* Status, Error flags */
  107. enum {
  108. I2C_STATUS_WR_BUFFER_FULL = 1U << 0,
  109. I2C_STATUS_BUS_ACTIVE = 1U << 8,
  110. I2C_STATUS_BUS_MASTER = 1U << 9,
  111. I2C_STATUS_ERROR_MASK = 0x38000FC,
  112. QUP_I2C_NACK_FLAG = 1U << 3,
  113. QUP_IN_NOT_EMPTY = 1U << 5,
  114. QUP_STATUS_ERROR_FLAGS = 0x7C,
  115. };
  116. /* Master status clock states */
  117. enum {
  118. I2C_CLK_RESET_BUSIDLE_STATE = 0,
  119. I2C_CLK_FORCED_LOW_STATE = 5,
  120. };
  121. enum msm_i2c_state {
  122. MSM_I2C_PM_ACTIVE,
  123. MSM_I2C_PM_SUSPENDED,
  124. MSM_I2C_SYS_SUSPENDING,
  125. MSM_I2C_SYS_SUSPENDED,
  126. };
  127. #define QUP_MAX_CLK_STATE_RETRIES 300
  128. #define DEFAULT_CLK_RATE (19200000)
  129. #define I2C_STATUS_CLK_STATE 13
  130. #define QUP_OUT_FIFO_NOT_EMPTY 0x10
  131. #define I2C_GPIOS_DT_CNT (2) /* sda and scl */
  132. #if defined(CONFIG_MACH_KS01EUR) || defined(CONFIG_SEC_CHAGALL_PROJECT)
  133. /* Register:QUP_I2C_MASTER_CLK_CTL field setters */
  134. #define QUP_I2C_SCL_NOISE_REJECTION(reg_val, noise_rej_val) \
  135. (((reg_val) & ~(0x3 << 24)) | (((noise_rej_val) & 0x3) << 24))
  136. #define QUP_I2C_SDA_NOISE_REJECTION(reg_val, noise_rej_val) \
  137. (((reg_val) & ~(0x3 << 26)) | (((noise_rej_val) & 0x3) << 26))
  138. #endif
  139. static char const * const i2c_rsrcs[] = {"i2c_clk", "i2c_sda"};
  140. static struct gpiomux_setting recovery_config = {
  141. .func = GPIOMUX_FUNC_GPIO,
  142. .drv = GPIOMUX_DRV_8MA,
  143. .pull = GPIOMUX_PULL_NONE,
  144. };
  145. /**
  146. * qup_i2c_clk_path_vote: data to use bus scaling driver for clock path vote
  147. *
  148. * @client_hdl when zero, client is not registered with the bus scaling driver,
  149. * and bus scaling functionality should not be used. When non zero, it
  150. * is a bus scaling client id and may be used to vote for clock path.
  151. * @reg_err when true, registration error was detected and an error message was
  152. * logged. i2c will attempt to re-register but will log error only once.
  153. * once registration succeed, the flag is set to false.
  154. */
  155. struct qup_i2c_clk_path_vote {
  156. u32 client_hdl;
  157. struct msm_bus_scale_pdata *pdata;
  158. bool reg_err;
  159. };
  160. struct qup_i2c_dev {
  161. struct device *dev;
  162. void __iomem *base; /* virtual */
  163. void __iomem *gsbi; /* virtual */
  164. int in_irq;
  165. int out_irq;
  166. int err_irq;
  167. int num_irqs;
  168. struct clk *clk;
  169. struct clk *pclk;
  170. struct i2c_adapter adapter;
  171. struct i2c_msg *msg;
  172. int pos;
  173. int cnt;
  174. int err;
  175. int mode;
  176. int clk_ctl;
  177. int one_bit_t;
  178. int out_fifo_sz;
  179. int in_fifo_sz;
  180. int out_blk_sz;
  181. int in_blk_sz;
  182. int wr_sz;
  183. struct msm_i2c_platform_data *pdata;
  184. enum msm_i2c_state pwr_state;
  185. atomic_t xfer_progress;
  186. struct mutex mlock;
  187. void *complete;
  188. int i2c_gpios[ARRAY_SIZE(i2c_rsrcs)];
  189. struct qup_i2c_clk_path_vote clk_path_vote;
  190. };
  191. #ifdef CONFIG_PM
  192. static int i2c_qup_pm_resume_runtime(struct device *device);
  193. #endif
  194. #ifdef DEBUG
  195. static void
  196. qup_print_status(struct qup_i2c_dev *dev)
  197. {
  198. uint32_t val;
  199. val = readl_relaxed(dev->base+QUP_CONFIG);
  200. dev_dbg(dev->dev, "Qup config is :0x%x\n", val);
  201. val = readl_relaxed(dev->base+QUP_STATE);
  202. dev_dbg(dev->dev, "Qup state is :0x%x\n", val);
  203. val = readl_relaxed(dev->base+QUP_IO_MODE);
  204. dev_dbg(dev->dev, "Qup mode is :0x%x\n", val);
  205. }
  206. #else
  207. static inline void qup_print_status(struct qup_i2c_dev *dev)
  208. {
  209. }
  210. #endif
  211. static irqreturn_t
  212. qup_i2c_interrupt(int irq, void *devid)
  213. {
  214. struct qup_i2c_dev *dev = devid;
  215. uint32_t status = 0;
  216. uint32_t status1 = 0;
  217. uint32_t op_flgs = 0;
  218. int err = 0;
  219. if (atomic_read(&dev->xfer_progress) != 1) {
  220. dev_err(dev->dev, "irq:%d when PM suspended\n", irq);
  221. return IRQ_NONE;
  222. }
  223. status = readl_relaxed(dev->base + QUP_I2C_STATUS);
  224. status1 = readl_relaxed(dev->base + QUP_ERROR_FLAGS);
  225. op_flgs = readl_relaxed(dev->base + QUP_OPERATIONAL);
  226. if (!dev->msg || !dev->complete) {
  227. /* Clear Error interrupt if it's a level triggered interrupt*/
  228. if (dev->num_irqs == 1) {
  229. writel_relaxed(QUP_RESET_STATE, dev->base+QUP_STATE);
  230. /* Ensure that state is written before ISR exits */
  231. mb();
  232. }
  233. return IRQ_HANDLED;
  234. }
  235. if (status & I2C_STATUS_ERROR_MASK) {
  236. dev_err(dev->dev, "QUP: I2C status flags :0x%x, irq:%d\n",
  237. status, irq);
  238. err = status;
  239. /* Clear Error interrupt if it's a level triggered interrupt*/
  240. if (dev->num_irqs == 1) {
  241. writel_relaxed(QUP_RESET_STATE, dev->base+QUP_STATE);
  242. /* Ensure that state is written before ISR exits */
  243. mb();
  244. }
  245. goto intr_done;
  246. }
  247. if (status1 & 0x7F) {
  248. dev_err(dev->dev, "QUP: QUP status flags :0x%x\n", status1);
  249. err = -status1;
  250. /* Clear Error interrupt if it's a level triggered interrupt*/
  251. if (dev->num_irqs == 1) {
  252. writel_relaxed((status1 & QUP_STATUS_ERROR_FLAGS),
  253. dev->base + QUP_ERROR_FLAGS);
  254. /* Ensure that error flags are cleared before ISR
  255. * exits
  256. */
  257. mb();
  258. }
  259. goto intr_done;
  260. }
  261. if ((dev->num_irqs == 3) && (dev->msg->flags == I2C_M_RD)
  262. && (irq == dev->out_irq))
  263. return IRQ_HANDLED;
  264. if (op_flgs & QUP_OUT_SVC_FLAG) {
  265. writel_relaxed(QUP_OUT_SVC_FLAG, dev->base + QUP_OPERATIONAL);
  266. /* Ensure that service flag is acknowledged before ISR exits */
  267. mb();
  268. }
  269. if (dev->msg->flags == I2C_M_RD) {
  270. if ((op_flgs & QUP_MX_INPUT_DONE) ||
  271. (op_flgs & QUP_IN_SVC_FLAG)) {
  272. writel_relaxed(QUP_IN_SVC_FLAG, dev->base
  273. + QUP_OPERATIONAL);
  274. /* Ensure that service flag is acknowledged before ISR
  275. * exits
  276. */
  277. mb();
  278. } else
  279. return IRQ_HANDLED;
  280. }
  281. intr_done:
  282. dev_dbg(dev->dev, "QUP intr= %d, i2c status=0x%x, qup status = 0x%x\n",
  283. irq, status, status1);
  284. qup_print_status(dev);
  285. dev->err = err;
  286. complete(dev->complete);
  287. return IRQ_HANDLED;
  288. }
  289. static int
  290. qup_i2c_poll_state(struct qup_i2c_dev *dev, uint32_t req_state, bool only_valid)
  291. {
  292. uint32_t retries = 0;
  293. dev_dbg(dev->dev, "Polling for state:0x%x, or valid-only:%d\n",
  294. req_state, only_valid);
  295. while (retries != 2000) {
  296. uint32_t status = readl_relaxed(dev->base + QUP_STATE);
  297. /*
  298. * If only valid bit needs to be checked, requested state is
  299. * 'don't care'
  300. */
  301. if (status & QUP_STATE_VALID) {
  302. if (only_valid)
  303. return 0;
  304. else if ((req_state & QUP_I2C_MAST_GEN) &&
  305. (status & QUP_I2C_MAST_GEN))
  306. return 0;
  307. else if ((status & QUP_STATE_MASK) == req_state)
  308. return 0;
  309. }
  310. if (retries++ == 1000)
  311. udelay(100);
  312. }
  313. return -ETIMEDOUT;
  314. }
  315. static int
  316. qup_update_state(struct qup_i2c_dev *dev, uint32_t state)
  317. {
  318. if (qup_i2c_poll_state(dev, 0, true) != 0)
  319. return -EIO;
  320. writel_relaxed(state, dev->base + QUP_STATE);
  321. if (qup_i2c_poll_state(dev, state, false) != 0)
  322. return -EIO;
  323. return 0;
  324. }
  325. #define MSM_I2C_CLK_PATH_SUSPEND (0)
  326. #define MSM_I2C_CLK_PATH_RESUME (1)
  327. #define MSM_I2C_CLK_PATH_MAX_BW(dev) ((dev->pdata->src_clk_rate * 8) / 1000)
  328. static int i2c_qup_clk_path_init(struct platform_device *pdev,
  329. struct qup_i2c_dev *dev)
  330. {
  331. struct msm_bus_vectors *paths = NULL;
  332. struct msm_bus_paths *usecases = NULL;
  333. if (!dev->pdata->master_id)
  334. return 0;
  335. dev_dbg(&pdev->dev, "initialises bus-scaling clock voting");
  336. paths = devm_kzalloc(&pdev->dev, sizeof(*paths) * 2, GFP_KERNEL);
  337. if (!paths) {
  338. dev_err(&pdev->dev,
  339. "msm_bus_paths.paths memory allocation failed");
  340. return -ENOMEM;
  341. }
  342. usecases = devm_kzalloc(&pdev->dev, sizeof(*usecases) * 2, GFP_KERNEL);
  343. if (!usecases) {
  344. dev_err(&pdev->dev,
  345. "msm_bus_scale_pdata.usecases memory allocation failed");
  346. goto path_init_err;
  347. }
  348. dev->clk_path_vote.pdata = devm_kzalloc(&pdev->dev,
  349. sizeof(*dev->clk_path_vote.pdata),
  350. GFP_KERNEL);
  351. if (!dev->clk_path_vote.pdata) {
  352. dev_err(&pdev->dev,
  353. "msm_bus_scale_pdata memory allocation failed");
  354. goto path_init_err;
  355. }
  356. paths[MSM_I2C_CLK_PATH_SUSPEND] = (struct msm_bus_vectors) {
  357. dev->pdata->master_id, MSM_BUS_SLAVE_EBI_CH0, 0, 0
  358. };
  359. paths[MSM_I2C_CLK_PATH_RESUME] = (struct msm_bus_vectors) {
  360. dev->pdata->master_id, MSM_BUS_SLAVE_EBI_CH0, 0,
  361. MSM_I2C_CLK_PATH_MAX_BW(dev)
  362. };
  363. usecases[MSM_I2C_CLK_PATH_SUSPEND] = (struct msm_bus_paths) {
  364. .num_paths = 1,
  365. .vectors = &paths[MSM_I2C_CLK_PATH_SUSPEND],
  366. };
  367. usecases[MSM_I2C_CLK_PATH_RESUME] = (struct msm_bus_paths) {
  368. .num_paths = 1,
  369. .vectors = &paths[MSM_I2C_CLK_PATH_RESUME],
  370. };
  371. *dev->clk_path_vote.pdata = (struct msm_bus_scale_pdata) {
  372. .active_only = dev->pdata->active_only,
  373. .name = pdev->name,
  374. .num_usecases = 2,
  375. .usecase = usecases,
  376. };
  377. return 0;
  378. path_init_err:
  379. devm_kfree(&pdev->dev, paths);
  380. devm_kfree(&pdev->dev, usecases);
  381. devm_kfree(&pdev->dev, dev->clk_path_vote.pdata);
  382. dev->clk_path_vote.pdata = NULL;
  383. return -ENOMEM;
  384. }
  385. static void i2c_qup_clk_path_teardown(struct qup_i2c_dev *dev)
  386. {
  387. if (dev->clk_path_vote.client_hdl) {
  388. msm_bus_scale_unregister_client(dev->clk_path_vote.client_hdl);
  389. dev->clk_path_vote.client_hdl = 0;
  390. }
  391. }
  392. static void i2c_qup_clk_path_vote(struct qup_i2c_dev *dev)
  393. {
  394. if (dev->clk_path_vote.client_hdl)
  395. msm_bus_scale_client_update_request(
  396. dev->clk_path_vote.client_hdl,
  397. MSM_I2C_CLK_PATH_RESUME);
  398. }
  399. static void i2c_qup_clk_path_unvote(struct qup_i2c_dev *dev)
  400. {
  401. if (dev->clk_path_vote.client_hdl)
  402. msm_bus_scale_client_update_request(
  403. dev->clk_path_vote.client_hdl,
  404. MSM_I2C_CLK_PATH_SUSPEND);
  405. }
  406. /**
  407. * i2c_qup_clk_path_postponed_register: reg with bus-scaling after it is probed
  408. *
  409. * Workaround: i2c driver may be probed before the bus scaling driver. Thus,
  410. * this function should be called not from probe but from a later context.
  411. * This function may be called more then once before register succeed. At
  412. * this case only one error message will be logged. At boot time all clocks
  413. * are on, so earlier i2c transactions should succeed.
  414. */
  415. static void i2c_qup_clk_path_postponed_register(struct qup_i2c_dev *dev)
  416. {
  417. /*
  418. * bail out if path voting is diabled (master_id == 0) or if it is
  419. * already registered (client_hdl != 0)
  420. */
  421. if (!dev->pdata->master_id || dev->clk_path_vote.client_hdl)
  422. return;
  423. dev->clk_path_vote.client_hdl = msm_bus_scale_register_client(
  424. dev->clk_path_vote.pdata);
  425. if (dev->clk_path_vote.client_hdl) {
  426. if (dev->clk_path_vote.reg_err) {
  427. /* log a success message if an error msg was logged */
  428. dev->clk_path_vote.reg_err = false;
  429. dev_info(dev->dev,
  430. "msm_bus_scale_register_client(mstr-id:%d "
  431. "actv-only:%d):0x%x",
  432. dev->pdata->master_id, dev->pdata->active_only,
  433. dev->clk_path_vote.client_hdl);
  434. }
  435. if (dev->pdata->active_only)
  436. i2c_qup_clk_path_vote(dev);
  437. } else {
  438. /* guard to log only one error on multiple failure */
  439. if (!dev->clk_path_vote.reg_err) {
  440. dev->clk_path_vote.reg_err = true;
  441. dev_info(dev->dev,
  442. "msm_bus_scale_register_client(mstr-id:%d "
  443. "actv-only:%d):0",
  444. dev->pdata->master_id, dev->pdata->active_only);
  445. }
  446. }
  447. }
  448. static int i2c_qup_gpio_request(struct qup_i2c_dev *dev)
  449. {
  450. int i;
  451. int result = 0;
  452. for (i = 0; i < ARRAY_SIZE(i2c_rsrcs); ++i) {
  453. if (dev->i2c_gpios[i] >= 0) {
  454. result = gpio_request(dev->i2c_gpios[i], i2c_rsrcs[i]);
  455. if (result) {
  456. dev_err(dev->dev,
  457. "gpio_request for pin %d failed with error %d\n",
  458. dev->i2c_gpios[i], result);
  459. goto error;
  460. }
  461. }
  462. }
  463. return 0;
  464. error:
  465. for (; --i >= 0;) {
  466. if (dev->i2c_gpios[i] >= 0)
  467. gpio_free(dev->i2c_gpios[i]);
  468. }
  469. return result;
  470. }
  471. static void i2c_qup_gpio_free(struct qup_i2c_dev *dev)
  472. {
  473. int i;
  474. for (i = 0; i < ARRAY_SIZE(i2c_rsrcs); ++i) {
  475. if (dev->i2c_gpios[i] >= 0)
  476. gpio_free(dev->i2c_gpios[i]);
  477. }
  478. }
  479. static void i2c_qup_pm_suspend_clk(struct qup_i2c_dev *dev)
  480. {
  481. uint32_t status;
  482. /* reset core and enable conditional dynamic clock gating */
  483. qup_update_state(dev, QUP_RESET_STATE);
  484. status = readl_relaxed(dev->base + QUP_CONFIG);
  485. status |= I2C_CORE_CLK_ON_EN;
  486. writel_relaxed(status, dev->base + QUP_CONFIG);
  487. /* ensure that write has really gone through */
  488. mb();
  489. clk_disable_unprepare(dev->clk);
  490. if (!dev->pdata->keep_ahb_clk_on)
  491. clk_disable_unprepare(dev->pclk);
  492. }
  493. static void i2c_qup_pm_resume_clk(struct qup_i2c_dev *dev)
  494. {
  495. clk_prepare_enable(dev->clk);
  496. if (!dev->pdata->keep_ahb_clk_on)
  497. clk_prepare_enable(dev->pclk);
  498. }
  499. static void i2c_qup_pm_suspend(struct qup_i2c_dev *dev)
  500. {
  501. if (dev->pwr_state == MSM_I2C_PM_SUSPENDED) {
  502. dev_err(dev->dev, "attempt to suspend when suspended\n");
  503. return;
  504. }
  505. if (!dev->pdata->clk_ctl_xfer)
  506. i2c_qup_pm_suspend_clk(dev);
  507. if (!dev->pdata->active_only)
  508. i2c_qup_clk_path_unvote(dev);
  509. i2c_qup_gpio_free(dev);
  510. dev->pwr_state = MSM_I2C_PM_SUSPENDED;
  511. }
  512. static void i2c_qup_pm_resume(struct qup_i2c_dev *dev)
  513. {
  514. if (dev->pwr_state == MSM_I2C_PM_ACTIVE)
  515. return;
  516. i2c_qup_gpio_request(dev);
  517. i2c_qup_clk_path_postponed_register(dev);
  518. if (!dev->pdata->active_only)
  519. i2c_qup_clk_path_vote(dev);
  520. if (!dev->pdata->clk_ctl_xfer)
  521. i2c_qup_pm_resume_clk(dev);
  522. dev->pwr_state = MSM_I2C_PM_ACTIVE;
  523. }
  524. static int
  525. qup_i2c_poll_writeready(struct qup_i2c_dev *dev, int rem)
  526. {
  527. uint32_t retries = 0;
  528. while (retries != 2000) {
  529. uint32_t status = readl_relaxed(dev->base + QUP_I2C_STATUS);
  530. if (!(status & I2C_STATUS_WR_BUFFER_FULL)) {
  531. if (((dev->msg->flags & I2C_M_RD) || (rem == 0)) &&
  532. !(status & I2C_STATUS_BUS_ACTIVE))
  533. return 0;
  534. else if ((dev->msg->flags == 0) && (rem > 0))
  535. return 0;
  536. else /* 1-bit delay before we check for bus busy */
  537. udelay(dev->one_bit_t);
  538. }
  539. if (retries++ == 1000) {
  540. /*
  541. * Wait for FIFO number of bytes to be absolutely sure
  542. * that I2C write state machine is not idle. Each byte
  543. * takes 9 clock cycles. (8 bits + 1 ack)
  544. */
  545. usleep_range((dev->one_bit_t * (dev->out_fifo_sz * 9)),
  546. (dev->one_bit_t * (dev->out_fifo_sz * 9)));
  547. }
  548. }
  549. qup_print_status(dev);
  550. return -ETIMEDOUT;
  551. }
  552. static int qup_i2c_poll_clock_ready(struct qup_i2c_dev *dev)
  553. {
  554. uint32_t retries = 0;
  555. uint32_t op_flgs = -1, clk_state = -1;
  556. /*
  557. * Wait for the clock state to transition to either IDLE or FORCED
  558. * LOW. This will usually happen within one cycle of the i2c clock.
  559. */
  560. while (retries++ < QUP_MAX_CLK_STATE_RETRIES) {
  561. uint32_t status = readl_relaxed(dev->base + QUP_I2C_STATUS);
  562. clk_state = (status >> I2C_STATUS_CLK_STATE) & 0x7;
  563. /* Read the operational register */
  564. op_flgs = readl_relaxed(dev->base +
  565. QUP_OPERATIONAL) & QUP_OUT_FIFO_NOT_EMPTY;
  566. /*
  567. * In very corner case when slave do clock stretching and
  568. * output fifo will have 1 block of data space empty at
  569. * the same time. So i2c qup will get output service
  570. * interrupt and as it doesn't have more data to be written.
  571. * This can lead to issue where output fifo is not empty.
  572. */
  573. if (op_flgs == 0 &&
  574. (clk_state == I2C_CLK_RESET_BUSIDLE_STATE ||
  575. clk_state == I2C_CLK_FORCED_LOW_STATE)){
  576. dev_dbg(dev->dev, "clk_state 0x%x op_flgs [%x]\n",
  577. clk_state, op_flgs);
  578. return 0;
  579. }
  580. /* 1-bit delay before we check again */
  581. udelay(dev->one_bit_t);
  582. }
  583. dev_err(dev->dev, "Error waiting for clk ready clk_state: 0x%x op_flgs: 0x%x\n",
  584. clk_state, op_flgs);
  585. return -ETIMEDOUT;
  586. }
  587. #ifdef DEBUG
  588. static void qup_verify_fifo(struct qup_i2c_dev *dev, uint32_t val,
  589. uint32_t addr, int rdwr)
  590. {
  591. if (rdwr)
  592. dev_dbg(dev->dev, "RD:Wrote 0x%x to out_ff:0x%x\n", val, addr);
  593. else
  594. dev_dbg(dev->dev, "WR:Wrote 0x%x to out_ff:0x%x\n", val, addr);
  595. }
  596. #else
  597. static inline void qup_verify_fifo(struct qup_i2c_dev *dev, uint32_t val,
  598. uint32_t addr, int rdwr)
  599. {
  600. }
  601. #endif
  602. static void
  603. qup_issue_read(struct qup_i2c_dev *dev, struct i2c_msg *msg, int *idx,
  604. uint32_t carry_over)
  605. {
  606. uint16_t addr = (msg->addr << 1) | 1;
  607. /* QUP limit 256 bytes per read. By HW design, 0 in the 8-bit field
  608. * is treated as 256 byte read.
  609. */
  610. uint16_t rd_len = ((dev->cnt == 256) ? 0 : dev->cnt);
  611. if (*idx % 4) {
  612. writel_relaxed(carry_over | ((QUP_OUT_START | addr) << 16),
  613. dev->base + QUP_OUT_FIFO_BASE);/* + (*idx-2)); */
  614. qup_verify_fifo(dev, carry_over |
  615. ((QUP_OUT_START | addr) << 16), (uint32_t)dev->base
  616. + QUP_OUT_FIFO_BASE + (*idx - 2), 1);
  617. writel_relaxed((QUP_OUT_REC | rd_len),
  618. dev->base + QUP_OUT_FIFO_BASE);/* + (*idx+2)); */
  619. qup_verify_fifo(dev, (QUP_OUT_REC | rd_len),
  620. (uint32_t)dev->base + QUP_OUT_FIFO_BASE + (*idx + 2), 1);
  621. } else {
  622. writel_relaxed(((QUP_OUT_REC | rd_len) << 16)
  623. | QUP_OUT_START | addr,
  624. dev->base + QUP_OUT_FIFO_BASE);/* + (*idx)); */
  625. qup_verify_fifo(dev, QUP_OUT_REC << 16 | rd_len << 16 |
  626. QUP_OUT_START | addr,
  627. (uint32_t)dev->base + QUP_OUT_FIFO_BASE + (*idx), 1);
  628. }
  629. *idx += 4;
  630. }
  631. static void
  632. qup_issue_write(struct qup_i2c_dev *dev, struct i2c_msg *msg, int rem,
  633. int *idx, uint32_t *carry_over)
  634. {
  635. int entries = dev->cnt;
  636. int empty_sl = dev->wr_sz - ((*idx) >> 1);
  637. int i = 0;
  638. uint32_t val = 0;
  639. uint32_t last_entry = 0;
  640. uint16_t addr = msg->addr << 1;
  641. if (dev->pos == 0) {
  642. if (*idx % 4) {
  643. writel_relaxed(*carry_over | ((QUP_OUT_START |
  644. addr) << 16),
  645. dev->base + QUP_OUT_FIFO_BASE);
  646. qup_verify_fifo(dev, *carry_over | QUP_OUT_START << 16 |
  647. addr << 16, (uint32_t)dev->base +
  648. QUP_OUT_FIFO_BASE + (*idx) - 2, 0);
  649. } else
  650. val = QUP_OUT_START | addr;
  651. *idx += 2;
  652. i++;
  653. entries++;
  654. } else {
  655. /* Avoid setp time issue by adding 1 NOP when number of bytes
  656. * are more than FIFO/BLOCK size. setup time issue can't appear
  657. * otherwise since next byte to be written will always be ready
  658. */
  659. val = (QUP_OUT_NOP | 1);
  660. *idx += 2;
  661. i++;
  662. entries++;
  663. }
  664. if (entries > empty_sl)
  665. entries = empty_sl;
  666. for (; i < (entries - 1); i++) {
  667. if (*idx % 4) {
  668. writel_relaxed(val | ((QUP_OUT_DATA |
  669. msg->buf[dev->pos]) << 16),
  670. dev->base + QUP_OUT_FIFO_BASE);
  671. qup_verify_fifo(dev, val | QUP_OUT_DATA << 16 |
  672. msg->buf[dev->pos] << 16, (uint32_t)dev->base +
  673. QUP_OUT_FIFO_BASE + (*idx) - 2, 0);
  674. } else
  675. val = QUP_OUT_DATA | msg->buf[dev->pos];
  676. (*idx) += 2;
  677. dev->pos++;
  678. }
  679. if (dev->pos < (msg->len - 1))
  680. last_entry = QUP_OUT_DATA;
  681. else if (rem > 1) /* not last array entry */
  682. last_entry = QUP_OUT_DATA;
  683. else
  684. last_entry = QUP_OUT_STOP;
  685. if ((*idx % 4) == 0) {
  686. /*
  687. * If read-start and read-command end up in different fifos, it
  688. * may result in extra-byte being read due to extra-read cycle.
  689. * Avoid that by inserting NOP as the last entry of fifo only
  690. * if write command(s) leave 1 space in fifo.
  691. */
  692. if (rem > 1) {
  693. struct i2c_msg *next = msg + 1;
  694. if (next->addr == msg->addr && (next->flags & I2C_M_RD)
  695. && *idx == ((dev->wr_sz*2) - 4)) {
  696. writel_relaxed(((last_entry |
  697. msg->buf[dev->pos]) |
  698. ((1 | QUP_OUT_NOP) << 16)), dev->base +
  699. QUP_OUT_FIFO_BASE);/* + (*idx) - 2); */
  700. qup_verify_fifo(dev,
  701. ((last_entry | msg->buf[dev->pos]) |
  702. ((1 | QUP_OUT_NOP) << 16)),
  703. (uint32_t)dev->base +
  704. QUP_OUT_FIFO_BASE + (*idx), 0);
  705. *idx += 2;
  706. } else if ((dev->pos == msg->len - 1)
  707. && *idx < (dev->wr_sz*2) &&
  708. (next->addr != msg->addr)) {
  709. /* Last byte of an intermittent write */
  710. writel_relaxed((QUP_OUT_STOP |
  711. msg->buf[dev->pos]),
  712. dev->base + QUP_OUT_FIFO_BASE);
  713. qup_verify_fifo(dev,
  714. QUP_OUT_STOP | msg->buf[dev->pos],
  715. (uint32_t)dev->base +
  716. QUP_OUT_FIFO_BASE + (*idx), 0);
  717. *idx += 2;
  718. } else
  719. *carry_over = (last_entry | msg->buf[dev->pos]);
  720. } else {
  721. writel_relaxed((last_entry | msg->buf[dev->pos]),
  722. dev->base + QUP_OUT_FIFO_BASE);/* + (*idx) - 2); */
  723. qup_verify_fifo(dev, last_entry | msg->buf[dev->pos],
  724. (uint32_t)dev->base + QUP_OUT_FIFO_BASE +
  725. (*idx), 0);
  726. }
  727. } else {
  728. writel_relaxed(val | ((last_entry | msg->buf[dev->pos]) << 16),
  729. dev->base + QUP_OUT_FIFO_BASE);/* + (*idx) - 2); */
  730. qup_verify_fifo(dev, val | (last_entry << 16) |
  731. (msg->buf[dev->pos] << 16), (uint32_t)dev->base +
  732. QUP_OUT_FIFO_BASE + (*idx) - 2, 0);
  733. }
  734. *idx += 2;
  735. dev->pos++;
  736. dev->cnt = msg->len - dev->pos;
  737. }
  738. static void
  739. qup_set_read_mode(struct qup_i2c_dev *dev, int rd_len)
  740. {
  741. uint32_t wr_mode = (dev->wr_sz < dev->out_fifo_sz) ?
  742. QUP_WR_BLK_MODE : 0;
  743. if (rd_len > 256) {
  744. dev_dbg(dev->dev, "HW limit: Breaking reads in chunk of 256\n");
  745. rd_len = 256;
  746. }
  747. if (rd_len <= dev->in_fifo_sz) {
  748. writel_relaxed(wr_mode | QUP_PACK_EN | QUP_UNPACK_EN,
  749. dev->base + QUP_IO_MODE);
  750. writel_relaxed(rd_len, dev->base + QUP_MX_READ_CNT);
  751. } else {
  752. writel_relaxed(wr_mode | QUP_RD_BLK_MODE |
  753. QUP_PACK_EN | QUP_UNPACK_EN, dev->base + QUP_IO_MODE);
  754. writel_relaxed(rd_len, dev->base + QUP_MX_INPUT_CNT);
  755. }
  756. }
  757. static int
  758. qup_set_wr_mode(struct qup_i2c_dev *dev, int rem)
  759. {
  760. int total_len = 0;
  761. int ret = 0;
  762. int len = dev->msg->len;
  763. struct i2c_msg *next = NULL;
  764. if (rem > 1)
  765. next = dev->msg + 1;
  766. while (rem > 1 && next->flags == 0 && (next->addr == dev->msg->addr)) {
  767. len += next->len + 1;
  768. next = next + 1;
  769. rem--;
  770. }
  771. if (len >= (dev->out_fifo_sz - 1)) {
  772. total_len = len + 1 + (len/(dev->out_blk_sz-1));
  773. writel_relaxed(QUP_WR_BLK_MODE | QUP_PACK_EN | QUP_UNPACK_EN,
  774. dev->base + QUP_IO_MODE);
  775. dev->wr_sz = dev->out_blk_sz;
  776. } else
  777. writel_relaxed(QUP_PACK_EN | QUP_UNPACK_EN,
  778. dev->base + QUP_IO_MODE);
  779. if (rem > 1) {
  780. if (next->addr == dev->msg->addr &&
  781. next->flags == I2C_M_RD) {
  782. qup_set_read_mode(dev, next->len);
  783. /* make sure read start & read command are in 1 blk */
  784. if ((total_len % dev->out_blk_sz) ==
  785. (dev->out_blk_sz - 1))
  786. total_len += 3;
  787. else
  788. total_len += 2;
  789. }
  790. }
  791. /* WRITE COUNT register valid/used only in block mode */
  792. if (dev->wr_sz == dev->out_blk_sz)
  793. writel_relaxed(total_len, dev->base + QUP_MX_WR_CNT);
  794. return ret;
  795. }
  796. static void qup_i2c_recover_bus_busy(struct qup_i2c_dev *dev)
  797. {
  798. int i;
  799. int gpio_clk;
  800. int gpio_dat;
  801. bool gpio_clk_status = false;
  802. uint32_t status = readl_relaxed(dev->base + QUP_I2C_STATUS);
  803. struct gpiomux_setting old_gpio_setting[ARRAY_SIZE(i2c_rsrcs)];
  804. if (dev->pdata->msm_i2c_config_gpio)
  805. return;
  806. if (!(status & (I2C_STATUS_BUS_ACTIVE)) ||
  807. (status & (I2C_STATUS_BUS_MASTER)))
  808. return;
  809. gpio_clk = dev->i2c_gpios[0];
  810. gpio_dat = dev->i2c_gpios[1];
  811. if ((gpio_clk == -1) && (gpio_dat == -1)) {
  812. dev_err(dev->dev, "Recovery failed due to undefined GPIO's\n");
  813. return;
  814. }
  815. disable_irq(dev->err_irq);
  816. for (i = 0; i < ARRAY_SIZE(i2c_rsrcs); ++i) {
  817. if (msm_gpiomux_write(dev->i2c_gpios[i], GPIOMUX_ACTIVE,
  818. &recovery_config, &old_gpio_setting[i])) {
  819. dev_err(dev->dev, "GPIO pins have no active setting\n");
  820. goto recovery_end;
  821. }
  822. }
  823. dev_warn(dev->dev, "i2c_scl: %d, i2c_sda: %d\n",
  824. gpio_get_value(gpio_clk), gpio_get_value(gpio_dat));
  825. for (i = 0; i < 9; i++) {
  826. if (gpio_get_value(gpio_dat) && gpio_clk_status)
  827. break;
  828. gpio_direction_output(gpio_clk, 0);
  829. udelay(5);
  830. gpio_direction_output(gpio_dat, 0);
  831. udelay(5);
  832. gpio_direction_input(gpio_clk);
  833. udelay(5);
  834. if (!gpio_get_value(gpio_clk))
  835. udelay(20);
  836. if (!gpio_get_value(gpio_clk))
  837. usleep_range(10000, 10000);
  838. gpio_clk_status = gpio_get_value(gpio_clk);
  839. gpio_direction_input(gpio_dat);
  840. udelay(5);
  841. }
  842. /* Configure ALT funciton to QUP I2C*/
  843. for (i = 0; i < ARRAY_SIZE(i2c_rsrcs); ++i) {
  844. msm_gpiomux_write(dev->i2c_gpios[i], GPIOMUX_ACTIVE,
  845. &old_gpio_setting[i], NULL);
  846. }
  847. udelay(10);
  848. status = readl_relaxed(dev->base + QUP_I2C_STATUS);
  849. if (!(status & I2C_STATUS_BUS_ACTIVE)) {
  850. dev_info(dev->dev, "Bus busy cleared after %d clock cycles, "
  851. "status %x\n",
  852. i, status);
  853. goto recovery_end;
  854. }
  855. dev_warn(dev->dev, "Bus still busy, status %x\n", status);
  856. recovery_end:
  857. enable_irq(dev->err_irq);
  858. }
  859. static int
  860. qup_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num)
  861. {
  862. DECLARE_COMPLETION_ONSTACK(complete);
  863. struct qup_i2c_dev *dev = i2c_get_adapdata(adap);
  864. int ret;
  865. int rem = num;
  866. long timeout;
  867. int err;
  868. /*
  869. * If all slaves of this controller behave as expected, they will
  870. * implement suspend and won't call any transaction if they are
  871. * suspended. Since controller is its parent, controller's suspend
  872. * will be called only AFTER alls slaves are suspended.
  873. * However reality is differe and some slave don't implement suspend
  874. * If a slave tries to initiate transfer when we are suspended,
  875. * pm_runtime_enabled is set to false by system-pm.
  876. * Make sure we return error when transaction is initiated while
  877. * we are in suspended state
  878. */
  879. mutex_lock(&dev->mlock);
  880. if (dev->pwr_state >= MSM_I2C_SYS_SUSPENDING) {
  881. dev_err(dev->dev,
  882. "xfer not allowed when ctrl is suspended addr:0x%x\n",
  883. msgs->addr);
  884. mutex_unlock(&dev->mlock);
  885. return -EIO;
  886. }
  887. if (!pm_runtime_enabled(dev->dev)) {
  888. dev_dbg(dev->dev, "Runtime PM FEATURE is disabled\n");
  889. i2c_qup_pm_resume(dev);
  890. } else {
  891. pm_runtime_get_sync(dev->dev);
  892. }
  893. if (dev->pdata->clk_ctl_xfer)
  894. i2c_qup_pm_resume_clk(dev);
  895. atomic_set(&dev->xfer_progress, 1);
  896. /* Initialize QUP registers during first transfer */
  897. if (dev->clk_ctl == 0) {
  898. int fs_div;
  899. int hs_div;
  900. uint32_t fifo_reg;
  901. if (dev->gsbi) {
  902. writel_relaxed(0x2 << 4, dev->gsbi);
  903. /* GSBI memory is not in the same 1K region as other
  904. * QUP registers. mb() here ensures that the GSBI
  905. * register is updated in correct order and that the
  906. * write has gone through before programming QUP core
  907. * registers
  908. */
  909. mb();
  910. }
  911. fs_div = ((dev->pdata->src_clk_rate
  912. / dev->pdata->clk_freq) / 2) - 3;
  913. hs_div = 3;
  914. dev->clk_ctl = ((hs_div & 0x7) << 8) | (fs_div & 0xff);
  915. #if defined(CONFIG_MACH_KS01EUR) || defined(CONFIG_SEC_CHAGALL_PROJECT)
  916. dev->clk_ctl = QUP_I2C_SCL_NOISE_REJECTION(
  917. dev->clk_ctl, dev->pdata->noise_rjct_scl);
  918. dev->clk_ctl = QUP_I2C_SDA_NOISE_REJECTION(
  919. dev->clk_ctl, dev->pdata->noise_rjct_sda);
  920. #endif
  921. fifo_reg = readl_relaxed(dev->base + QUP_IO_MODE);
  922. if (fifo_reg & 0x3)
  923. dev->out_blk_sz = (fifo_reg & 0x3) * 16;
  924. else
  925. dev->out_blk_sz = 16;
  926. if (fifo_reg & 0x60)
  927. dev->in_blk_sz = ((fifo_reg & 0x60) >> 5) * 16;
  928. else
  929. dev->in_blk_sz = 16;
  930. /*
  931. * The block/fifo size w.r.t. 'actual data' is 1/2 due to 'tag'
  932. * associated with each byte written/received
  933. */
  934. dev->out_blk_sz /= 2;
  935. dev->in_blk_sz /= 2;
  936. dev->out_fifo_sz = dev->out_blk_sz *
  937. (2 << ((fifo_reg & 0x1C) >> 2));
  938. dev->in_fifo_sz = dev->in_blk_sz *
  939. (2 << ((fifo_reg & 0x380) >> 7));
  940. dev_dbg(dev->dev, "QUP IN:bl:%d, ff:%d, OUT:bl:%d, ff:%d\n",
  941. dev->in_blk_sz, dev->in_fifo_sz,
  942. dev->out_blk_sz, dev->out_fifo_sz);
  943. }
  944. writel_relaxed(1, dev->base + QUP_SW_RESET);
  945. ret = qup_i2c_poll_state(dev, QUP_RESET_STATE, false);
  946. if (ret) {
  947. dev_err(dev->dev, "QUP Busy:Trying to recover\n");
  948. goto out_err;
  949. }
  950. if (dev->num_irqs == 3) {
  951. enable_irq(dev->in_irq);
  952. enable_irq(dev->out_irq);
  953. }
  954. enable_irq(dev->err_irq);
  955. /* Initialize QUP registers */
  956. writel_relaxed(0, dev->base + QUP_CONFIG);
  957. writel_relaxed(QUP_OPERATIONAL_RESET, dev->base + QUP_OPERATIONAL);
  958. writel_relaxed(QUP_STATUS_ERROR_FLAGS, dev->base + QUP_ERROR_FLAGS_EN);
  959. writel_relaxed(I2C_MINI_CORE | I2C_N_VAL, dev->base + QUP_CONFIG);
  960. /* Initialize I2C mini core registers */
  961. writel_relaxed(0, dev->base + QUP_I2C_CLK_CTL);
  962. writel_relaxed(QUP_I2C_STATUS_RESET, dev->base + QUP_I2C_STATUS);
  963. while (rem) {
  964. bool filled = false;
  965. dev->cnt = msgs->len - dev->pos;
  966. dev->msg = msgs;
  967. dev->wr_sz = dev->out_fifo_sz;
  968. dev->err = 0;
  969. dev->complete = &complete;
  970. if (qup_i2c_poll_state(dev, QUP_I2C_MAST_GEN, false) != 0) {
  971. ret = -EIO;
  972. goto out_err;
  973. }
  974. qup_print_status(dev);
  975. /* HW limits Read upto 256 bytes in 1 read without stop */
  976. if (dev->msg->flags & I2C_M_RD) {
  977. qup_set_read_mode(dev, dev->cnt);
  978. if (dev->cnt > 256)
  979. dev->cnt = 256;
  980. } else {
  981. ret = qup_set_wr_mode(dev, rem);
  982. if (ret != 0)
  983. goto out_err;
  984. /* Don't fill block till we get interrupt */
  985. if (dev->wr_sz == dev->out_blk_sz)
  986. filled = true;
  987. }
  988. err = qup_update_state(dev, QUP_RUN_STATE);
  989. if (err < 0) {
  990. ret = err;
  991. goto out_err;
  992. }
  993. qup_print_status(dev);
  994. writel_relaxed(dev->clk_ctl, dev->base + QUP_I2C_CLK_CTL);
  995. /* CLK_CTL register is not in the same 1K region as other QUP
  996. * registers. Ensure that clock control is written before
  997. * programming other QUP registers
  998. */
  999. mb();
  1000. do {
  1001. int idx = 0;
  1002. uint32_t carry_over = 0;
  1003. /* Transition to PAUSE state only possible from RUN */
  1004. err = qup_update_state(dev, QUP_PAUSE_STATE);
  1005. if (err < 0) {
  1006. ret = err;
  1007. goto out_err;
  1008. }
  1009. qup_print_status(dev);
  1010. /* This operation is Write, check the next operation
  1011. * and decide mode
  1012. */
  1013. while (filled == false) {
  1014. if ((msgs->flags & I2C_M_RD))
  1015. qup_issue_read(dev, msgs, &idx,
  1016. carry_over);
  1017. else if (!(msgs->flags & I2C_M_RD))
  1018. qup_issue_write(dev, msgs, rem, &idx,
  1019. &carry_over);
  1020. if (idx >= (dev->wr_sz << 1))
  1021. filled = true;
  1022. /* Start new message */
  1023. if (filled == false) {
  1024. if (msgs->flags & I2C_M_RD)
  1025. filled = true;
  1026. else if (rem > 1) {
  1027. /* Only combine operations with
  1028. * same address
  1029. */
  1030. struct i2c_msg *next = msgs + 1;
  1031. if (next->addr != msgs->addr)
  1032. filled = true;
  1033. else {
  1034. rem--;
  1035. msgs++;
  1036. dev->msg = msgs;
  1037. dev->pos = 0;
  1038. dev->cnt = msgs->len;
  1039. if (msgs->len > 256)
  1040. dev->cnt = 256;
  1041. }
  1042. } else
  1043. filled = true;
  1044. }
  1045. }
  1046. err = qup_update_state(dev, QUP_RUN_STATE);
  1047. if (err < 0) {
  1048. ret = err;
  1049. goto out_err;
  1050. }
  1051. dev_dbg(dev->dev, "idx:%d, rem:%d, num:%d, mode:%d\n",
  1052. idx, rem, num, dev->mode);
  1053. qup_print_status(dev);
  1054. timeout = wait_for_completion_timeout(&complete,
  1055. msecs_to_jiffies(dev->out_fifo_sz));
  1056. if (!timeout) {
  1057. uint32_t istatus = readl_relaxed(dev->base +
  1058. QUP_I2C_STATUS);
  1059. uint32_t qstatus = readl_relaxed(dev->base +
  1060. QUP_ERROR_FLAGS);
  1061. uint32_t op_flgs = readl_relaxed(dev->base +
  1062. QUP_OPERATIONAL);
  1063. /*
  1064. * Dont wait for 1 sec if i2c sees the bus
  1065. * active and controller is not master.
  1066. * A slave has pulled line low. Try to recover
  1067. */
  1068. if (!(istatus & I2C_STATUS_BUS_ACTIVE) ||
  1069. (istatus & I2C_STATUS_BUS_MASTER)) {
  1070. timeout =
  1071. wait_for_completion_timeout(&complete,
  1072. HZ);
  1073. if (timeout)
  1074. goto timeout_err;
  1075. }
  1076. qup_i2c_recover_bus_busy(dev);
  1077. dev_err(dev->dev,
  1078. "Transaction timed out, SL-AD = 0x%x\n",
  1079. dev->msg->addr);
  1080. dev_err(dev->dev, "I2C Status: %x\n", istatus);
  1081. dev_err(dev->dev, "QUP Status: %x\n", qstatus);
  1082. dev_err(dev->dev, "OP Flags: %x\n", op_flgs);
  1083. writel_relaxed(1, dev->base + QUP_SW_RESET);
  1084. /* Make sure that the write has gone through
  1085. * before returning from the function
  1086. */
  1087. mb();
  1088. ret = -ETIMEDOUT;
  1089. goto out_err;
  1090. }
  1091. timeout_err:
  1092. if (dev->err) {
  1093. if (dev->err > 0 &&
  1094. dev->err & QUP_I2C_NACK_FLAG) {
  1095. dev_err(dev->dev,
  1096. "I2C slave addr:0x%x not connected\n",
  1097. dev->msg->addr);
  1098. dev->err = ENOTCONN;
  1099. } else if (dev->err < 0) {
  1100. dev_err(dev->dev,
  1101. "QUP data xfer error %d\n", dev->err);
  1102. ret = dev->err;
  1103. goto out_err;
  1104. } else if (dev->err > 0) {
  1105. /*
  1106. * ISR returns +ve error if error code
  1107. * is I2C related, e.g. unexpected start
  1108. * So you may call recover-bus-busy when
  1109. * this error happens
  1110. */
  1111. qup_i2c_recover_bus_busy(dev);
  1112. }
  1113. ret = -dev->err;
  1114. goto out_err;
  1115. }
  1116. if (dev->msg->flags & I2C_M_RD) {
  1117. int i;
  1118. uint32_t dval = 0;
  1119. for (i = 0; dev->pos < dev->msg->len; i++,
  1120. dev->pos++) {
  1121. uint32_t rd_status =
  1122. readl_relaxed(dev->base
  1123. + QUP_OPERATIONAL);
  1124. if (i % 2 == 0) {
  1125. if ((rd_status &
  1126. QUP_IN_NOT_EMPTY) == 0)
  1127. break;
  1128. dval = readl_relaxed(dev->base +
  1129. QUP_IN_FIFO_BASE);
  1130. dev->msg->buf[dev->pos] =
  1131. dval & 0xFF;
  1132. } else
  1133. dev->msg->buf[dev->pos] =
  1134. ((dval & 0xFF0000) >>
  1135. 16);
  1136. }
  1137. dev->cnt -= i;
  1138. } else
  1139. filled = false; /* refill output FIFO */
  1140. dev_dbg(dev->dev, "pos:%d, len:%d, cnt:%d\n",
  1141. dev->pos, msgs->len, dev->cnt);
  1142. } while (dev->cnt > 0);
  1143. if (dev->cnt == 0) {
  1144. if (msgs->len == dev->pos) {
  1145. rem--;
  1146. msgs++;
  1147. dev->pos = 0;
  1148. }
  1149. if (rem) {
  1150. err = qup_i2c_poll_clock_ready(dev);
  1151. if (err < 0) {
  1152. ret = err;
  1153. goto out_err;
  1154. }
  1155. err = qup_update_state(dev, QUP_RESET_STATE);
  1156. if (err < 0) {
  1157. ret = err;
  1158. goto out_err;
  1159. }
  1160. }
  1161. }
  1162. /* Wait for I2C bus to be idle */
  1163. ret = qup_i2c_poll_writeready(dev, rem);
  1164. if (ret) {
  1165. dev_err(dev->dev,
  1166. "Error waiting for write ready\n");
  1167. goto out_err;
  1168. }
  1169. }
  1170. ret = num;
  1171. out_err:
  1172. disable_irq(dev->err_irq);
  1173. if (dev->num_irqs == 3) {
  1174. disable_irq(dev->in_irq);
  1175. disable_irq(dev->out_irq);
  1176. }
  1177. dev->complete = NULL;
  1178. dev->msg = NULL;
  1179. dev->pos = 0;
  1180. dev->err = 0;
  1181. dev->cnt = 0;
  1182. if (dev->pdata->clk_ctl_xfer)
  1183. i2c_qup_pm_suspend_clk(dev);
  1184. atomic_set(&dev->xfer_progress, 0);
  1185. mutex_unlock(&dev->mlock);
  1186. pm_runtime_mark_last_busy(dev->dev);
  1187. pm_runtime_put_autosuspend(dev->dev);
  1188. return ret;
  1189. }
  1190. enum msm_i2c_dt_entry_status {
  1191. DT_REQUIRED,
  1192. DT_SUGGESTED,
  1193. DT_OPTIONAL,
  1194. };
  1195. enum msm_i2c_dt_entry_type {
  1196. DT_U32,
  1197. DT_GPIO,
  1198. DT_BOOL,
  1199. };
  1200. struct msm_i2c_dt_to_pdata_map {
  1201. const char *dt_name;
  1202. void *ptr_data;
  1203. enum msm_i2c_dt_entry_status status;
  1204. enum msm_i2c_dt_entry_type type;
  1205. int default_val;
  1206. };
  1207. int __devinit msm_i2c_rsrcs_dt_to_pdata_map(struct platform_device *pdev,
  1208. struct msm_i2c_platform_data *pdata, int *gpios)
  1209. {
  1210. int ret, err = 0;
  1211. struct device_node *node = pdev->dev.of_node;
  1212. struct msm_i2c_dt_to_pdata_map *itr;
  1213. struct msm_i2c_dt_to_pdata_map map[] = {
  1214. {"qcom,i2c-bus-freq", &pdata->clk_freq, DT_REQUIRED, DT_U32, 0},
  1215. {"cell-index", &pdev->id, DT_REQUIRED, DT_U32, -1},
  1216. {"qcom,i2c-src-freq", &pdata->src_clk_rate, DT_SUGGESTED, DT_U32, 0},
  1217. {"qcom,master-id", &pdata->master_id, DT_SUGGESTED, DT_U32, 0},
  1218. {"qcom,scl-gpio", gpios, DT_OPTIONAL, DT_GPIO, -1},
  1219. {"qcom,sda-gpio", gpios + 1, DT_OPTIONAL, DT_GPIO, -1},
  1220. {"qcom,clk-ctl-xfer", &pdata->clk_ctl_xfer, DT_OPTIONAL, DT_BOOL, -1},
  1221. {"qcom,active-only", &pdata->active_only, DT_OPTIONAL, DT_BOOL, 0},
  1222. #if defined(CONFIG_MACH_KS01EUR) || defined(CONFIG_SEC_CHAGALL_PROJECT)
  1223. {"qcom,noise-rjct-scl", &pdata->noise_rjct_scl, DT_OPTIONAL, DT_U32, 0},
  1224. {"qcom,noise-rjct-sda", &pdata->noise_rjct_sda, DT_OPTIONAL, DT_U32, 0},
  1225. #endif
  1226. {NULL, NULL, 0, 0, 0},
  1227. };
  1228. for (itr = map; itr->dt_name ; ++itr) {
  1229. switch (itr->type) {
  1230. case DT_GPIO:
  1231. ret = of_get_named_gpio(node, itr->dt_name, 0);
  1232. if (ret >= 0) {
  1233. *((int *) itr->ptr_data) = ret;
  1234. ret = 0;
  1235. }
  1236. break;
  1237. case DT_U32:
  1238. ret = of_property_read_u32(node, itr->dt_name,
  1239. (u32 *) itr->ptr_data);
  1240. break;
  1241. case DT_BOOL:
  1242. *((bool *) itr->ptr_data) =
  1243. of_property_read_bool(node, itr->dt_name);
  1244. ret = 0;
  1245. break;
  1246. default:
  1247. dev_err(&pdev->dev, "%d is an unknown DT entry type\n",
  1248. itr->type);
  1249. ret = -EBADE;
  1250. }
  1251. dev_dbg(&pdev->dev, "DT entry ret:%d name:%s val:%d\n",
  1252. ret, itr->dt_name, *((int *)itr->ptr_data));
  1253. if (ret) {
  1254. *((int *)itr->ptr_data) = itr->default_val;
  1255. if (itr->status < DT_OPTIONAL) {
  1256. dev_err(&pdev->dev, "Missing '%s' DT entry\n",
  1257. itr->dt_name);
  1258. /* cont on err to dump all missing entries */
  1259. if (itr->status == DT_REQUIRED && !err)
  1260. err = ret;
  1261. }
  1262. }
  1263. }
  1264. return err;
  1265. }
  1266. static u32
  1267. qup_i2c_func(struct i2c_adapter *adap)
  1268. {
  1269. return I2C_FUNC_I2C | (I2C_FUNC_SMBUS_EMUL & ~I2C_FUNC_SMBUS_QUICK);
  1270. }
  1271. static const struct i2c_algorithm qup_i2c_algo = {
  1272. .master_xfer = qup_i2c_xfer,
  1273. .functionality = qup_i2c_func,
  1274. };
  1275. static int __devinit
  1276. qup_i2c_probe(struct platform_device *pdev)
  1277. {
  1278. struct qup_i2c_dev *dev;
  1279. struct resource *qup_mem, *gsbi_mem, *qup_io, *gsbi_io, *res;
  1280. struct resource *in_irq, *out_irq, *err_irq;
  1281. struct clk *clk, *pclk;
  1282. int ret = 0;
  1283. int i;
  1284. int dt_gpios[I2C_GPIOS_DT_CNT];
  1285. bool use_device_tree = pdev->dev.of_node;
  1286. struct msm_i2c_platform_data *pdata;
  1287. gsbi_mem = NULL;
  1288. dev_dbg(&pdev->dev, "qup_i2c_probe\n");
  1289. if (use_device_tree) {
  1290. pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
  1291. if (!pdata)
  1292. return -ENOMEM;
  1293. ret = msm_i2c_rsrcs_dt_to_pdata_map(pdev, pdata, dt_gpios);
  1294. if (ret)
  1295. goto get_res_failed;
  1296. } else
  1297. pdata = pdev->dev.platform_data;
  1298. if (!pdata) {
  1299. dev_err(&pdev->dev, "platform data not initialized\n");
  1300. return -ENOSYS;
  1301. }
  1302. qup_mem = platform_get_resource_byname(pdev, IORESOURCE_MEM,
  1303. "qup_phys_addr");
  1304. if (!qup_mem) {
  1305. dev_err(&pdev->dev,
  1306. "platform_get_resource_byname(qup_phys_addr) failed\n");
  1307. ret = -ENODEV;
  1308. goto get_res_failed;
  1309. }
  1310. /*
  1311. * We only have 1 interrupt for new hardware targets and in_irq,
  1312. * out_irq will be NULL for those platforms
  1313. */
  1314. in_irq = platform_get_resource_byname(pdev, IORESOURCE_IRQ,
  1315. "qup_in_intr");
  1316. out_irq = platform_get_resource_byname(pdev, IORESOURCE_IRQ,
  1317. "qup_out_intr");
  1318. err_irq = platform_get_resource_byname(pdev, IORESOURCE_IRQ,
  1319. "qup_err_intr");
  1320. if (!err_irq) {
  1321. dev_err(&pdev->dev, "no error irq resource?\n");
  1322. ret = -ENODEV;
  1323. goto get_res_failed;
  1324. }
  1325. qup_io = request_mem_region(qup_mem->start, resource_size(qup_mem),
  1326. pdev->name);
  1327. if (!qup_io) {
  1328. dev_err(&pdev->dev, "QUP region already claimed\n");
  1329. ret = -EBUSY;
  1330. goto get_res_failed;
  1331. }
  1332. if (!pdata->use_gsbi_shared_mode) {
  1333. gsbi_mem = platform_get_resource_byname(pdev, IORESOURCE_MEM,
  1334. "gsbi_qup_i2c_addr");
  1335. if (!gsbi_mem) {
  1336. dev_dbg(&pdev->dev, "Assume BLSP\n");
  1337. /*
  1338. * BLSP core does not need protocol programming so this
  1339. * resource is not expected
  1340. */
  1341. goto blsp_core_init;
  1342. }
  1343. gsbi_io = request_mem_region(gsbi_mem->start,
  1344. resource_size(gsbi_mem),
  1345. pdev->name);
  1346. if (!gsbi_io) {
  1347. dev_err(&pdev->dev, "GSBI region already claimed\n");
  1348. ret = -EBUSY;
  1349. goto err_res_failed;
  1350. }
  1351. }
  1352. blsp_core_init:
  1353. clk = clk_get(&pdev->dev, "core_clk");
  1354. if (IS_ERR(clk)) {
  1355. dev_err(&pdev->dev, "Could not get core_clk\n");
  1356. ret = PTR_ERR(clk);
  1357. goto err_clk_get_failed;
  1358. }
  1359. pclk = clk_get(&pdev->dev, "iface_clk");
  1360. if (IS_ERR(pclk)) {
  1361. dev_err(&pdev->dev, "Could not get iface_clk\n");
  1362. ret = PTR_ERR(pclk);
  1363. clk_put(clk);
  1364. goto err_config_pckl_failed;
  1365. }
  1366. /* We support frequencies upto FAST Mode(400KHz) */
  1367. if (pdata->clk_freq <= 0 ||
  1368. pdata->clk_freq > 400000) {
  1369. dev_err(&pdev->dev, "clock frequency not supported\n");
  1370. ret = -EIO;
  1371. goto err_config_failed;
  1372. }
  1373. dev = kzalloc(sizeof(struct qup_i2c_dev), GFP_KERNEL);
  1374. if (!dev) {
  1375. ret = -ENOMEM;
  1376. goto err_alloc_dev_failed;
  1377. }
  1378. dev->dev = &pdev->dev;
  1379. if (in_irq)
  1380. dev->in_irq = in_irq->start;
  1381. if (out_irq)
  1382. dev->out_irq = out_irq->start;
  1383. dev->err_irq = err_irq->start;
  1384. if (in_irq && out_irq)
  1385. dev->num_irqs = 3;
  1386. else
  1387. dev->num_irqs = 1;
  1388. dev->clk = clk;
  1389. dev->pclk = pclk;
  1390. dev->base = ioremap(qup_mem->start, resource_size(qup_mem));
  1391. if (!dev->base) {
  1392. ret = -ENOMEM;
  1393. goto err_ioremap_failed;
  1394. }
  1395. /* Configure GSBI block to use I2C functionality */
  1396. if (gsbi_mem) {
  1397. dev->gsbi = ioremap(gsbi_mem->start, resource_size(gsbi_mem));
  1398. if (!dev->gsbi) {
  1399. ret = -ENOMEM;
  1400. goto err_gsbi_failed;
  1401. }
  1402. }
  1403. for (i = 0; i < ARRAY_SIZE(i2c_rsrcs); ++i) {
  1404. if (use_device_tree && i < I2C_GPIOS_DT_CNT) {
  1405. dev->i2c_gpios[i] = dt_gpios[i];
  1406. } else {
  1407. res = platform_get_resource_byname(pdev, IORESOURCE_IO,
  1408. i2c_rsrcs[i]);
  1409. dev->i2c_gpios[i] = res ? res->start : -1;
  1410. }
  1411. }
  1412. platform_set_drvdata(pdev, dev);
  1413. dev->one_bit_t = (USEC_PER_SEC/pdata->clk_freq) + 1;
  1414. dev->pdata = pdata;
  1415. dev->clk_ctl = 0;
  1416. dev->pos = 0;
  1417. ret = i2c_qup_clk_path_init(pdev, dev);
  1418. if (ret) {
  1419. dev_err(&pdev->dev,
  1420. "Failed to init clock path-voting data structs. err:%d", ret);
  1421. /* disable i2c_qup_clk_path_xxx() functionality */
  1422. dev->pdata->master_id = 0;
  1423. }
  1424. if (dev->pdata->src_clk_rate <= 0) {
  1425. dev_info(&pdev->dev,
  1426. "No src_clk_rate specified in platfrom data\n");
  1427. dev_info(&pdev->dev, "Using default clock rate %dHz\n",
  1428. DEFAULT_CLK_RATE);
  1429. dev->pdata->src_clk_rate = DEFAULT_CLK_RATE;
  1430. }
  1431. ret = clk_set_rate(dev->clk, dev->pdata->src_clk_rate);
  1432. if (ret)
  1433. dev_info(&pdev->dev, "clk_set_rate(core_clk, %dHz):%d\n",
  1434. dev->pdata->src_clk_rate, ret);
  1435. clk_prepare_enable(dev->clk);
  1436. clk_prepare_enable(dev->pclk);
  1437. /*
  1438. * If bootloaders leave a pending interrupt on certain GSBI's,
  1439. * then we reset the core before registering for interrupts.
  1440. */
  1441. writel_relaxed(1, dev->base + QUP_SW_RESET);
  1442. if (qup_i2c_poll_state(dev, 0, true) != 0)
  1443. goto err_reset_failed;
  1444. clk_disable_unprepare(dev->clk);
  1445. clk_disable_unprepare(dev->pclk);
  1446. /*
  1447. * We use num_irqs to also indicate if we got 3 interrupts or just 1.
  1448. * If we have just 1, we use err_irq as the general purpose irq
  1449. * and handle the changes in ISR accordingly
  1450. * Per Hardware guidelines, if we have 3 interrupts, they are always
  1451. * edge triggering, and if we have 1, it's always level-triggering
  1452. */
  1453. if (dev->num_irqs == 3) {
  1454. ret = request_irq(dev->in_irq, qup_i2c_interrupt,
  1455. IRQF_TRIGGER_RISING, "qup_in_intr", dev);
  1456. if (ret) {
  1457. dev_err(&pdev->dev, "request_in_irq failed\n");
  1458. goto err_request_irq_failed;
  1459. }
  1460. /*
  1461. * We assume out_irq exists if in_irq does since platform
  1462. * configuration either has 3 interrupts assigned to QUP or 1
  1463. */
  1464. ret = request_irq(dev->out_irq, qup_i2c_interrupt,
  1465. IRQF_TRIGGER_RISING, "qup_out_intr", dev);
  1466. if (ret) {
  1467. dev_err(&pdev->dev, "request_out_irq failed\n");
  1468. free_irq(dev->in_irq, dev);
  1469. goto err_request_irq_failed;
  1470. }
  1471. ret = request_irq(dev->err_irq, qup_i2c_interrupt,
  1472. IRQF_TRIGGER_RISING, "qup_err_intr", dev);
  1473. if (ret) {
  1474. dev_err(&pdev->dev, "request_err_irq failed\n");
  1475. free_irq(dev->out_irq, dev);
  1476. free_irq(dev->in_irq, dev);
  1477. goto err_request_irq_failed;
  1478. }
  1479. } else {
  1480. ret = request_irq(dev->err_irq, qup_i2c_interrupt,
  1481. IRQF_TRIGGER_HIGH, "qup_err_intr", dev);
  1482. if (ret) {
  1483. dev_err(&pdev->dev, "request_err_irq failed\n");
  1484. goto err_request_irq_failed;
  1485. }
  1486. }
  1487. disable_irq(dev->err_irq);
  1488. if (dev->num_irqs == 3) {
  1489. disable_irq(dev->in_irq);
  1490. disable_irq(dev->out_irq);
  1491. }
  1492. i2c_set_adapdata(&dev->adapter, dev);
  1493. dev->adapter.algo = &qup_i2c_algo;
  1494. strlcpy(dev->adapter.name,
  1495. "QUP I2C adapter",
  1496. sizeof(dev->adapter.name));
  1497. dev->adapter.nr = pdev->id;
  1498. dev->adapter.dev.parent = &pdev->dev;
  1499. if (pdata->msm_i2c_config_gpio)
  1500. pdata->msm_i2c_config_gpio(dev->adapter.nr, 1);
  1501. mutex_init(&dev->mlock);
  1502. dev->pwr_state = MSM_I2C_PM_SUSPENDED;
  1503. atomic_set(&dev->xfer_progress, 0);
  1504. /* If the same AHB clock is used on Modem side
  1505. * switch it on here itself and don't switch it
  1506. * on and off during suspend and resume.
  1507. */
  1508. if (dev->pdata->keep_ahb_clk_on)
  1509. clk_prepare_enable(dev->pclk);
  1510. ret = i2c_add_numbered_adapter(&dev->adapter);
  1511. if (ret) {
  1512. dev_err(&pdev->dev, "i2c_add_adapter failed\n");
  1513. if (dev->num_irqs == 3) {
  1514. free_irq(dev->out_irq, dev);
  1515. free_irq(dev->in_irq, dev);
  1516. }
  1517. free_irq(dev->err_irq, dev);
  1518. } else {
  1519. if (dev->dev->of_node) {
  1520. dev->adapter.dev.of_node = pdev->dev.of_node;
  1521. of_i2c_register_devices(&dev->adapter);
  1522. }
  1523. pm_runtime_set_autosuspend_delay(&pdev->dev, MSEC_PER_SEC);
  1524. pm_runtime_use_autosuspend(&pdev->dev);
  1525. pm_runtime_enable(&pdev->dev);
  1526. return 0;
  1527. }
  1528. err_request_irq_failed:
  1529. if (dev->gsbi)
  1530. iounmap(dev->gsbi);
  1531. err_reset_failed:
  1532. clk_disable_unprepare(dev->clk);
  1533. clk_disable_unprepare(dev->pclk);
  1534. i2c_qup_clk_path_teardown(dev);
  1535. err_gsbi_failed:
  1536. iounmap(dev->base);
  1537. err_ioremap_failed:
  1538. kfree(dev);
  1539. err_alloc_dev_failed:
  1540. err_config_failed:
  1541. clk_put(clk);
  1542. err_config_pckl_failed:
  1543. clk_put(pclk);
  1544. err_clk_get_failed:
  1545. if (gsbi_mem)
  1546. release_mem_region(gsbi_mem->start, resource_size(gsbi_mem));
  1547. err_res_failed:
  1548. release_mem_region(qup_mem->start, resource_size(qup_mem));
  1549. get_res_failed:
  1550. if (pdev->dev.of_node)
  1551. kfree(pdata);
  1552. return ret;
  1553. }
  1554. static void qup_i2c_mem_release(struct platform_device *pdev, const char *name)
  1555. {
  1556. struct resource *res =
  1557. platform_get_resource_byname(pdev, IORESOURCE_MEM, name);
  1558. if (res)
  1559. release_mem_region(res->start, resource_size(res));
  1560. else
  1561. dev_dbg(&pdev->dev,
  1562. "platform_get_resource_byname(%s) failed\n", name);
  1563. }
  1564. static int __devexit
  1565. qup_i2c_remove(struct platform_device *pdev)
  1566. {
  1567. struct qup_i2c_dev *dev = platform_get_drvdata(pdev);
  1568. /* Grab mutex to ensure ongoing transaction is over */
  1569. mutex_lock(&dev->mlock);
  1570. dev->pwr_state = MSM_I2C_SYS_SUSPENDING;
  1571. mutex_unlock(&dev->mlock);
  1572. i2c_qup_pm_suspend(dev);
  1573. dev->pwr_state = MSM_I2C_SYS_SUSPENDED;
  1574. mutex_destroy(&dev->mlock);
  1575. platform_set_drvdata(pdev, NULL);
  1576. if (dev->num_irqs == 3) {
  1577. free_irq(dev->out_irq, dev);
  1578. free_irq(dev->in_irq, dev);
  1579. }
  1580. free_irq(dev->err_irq, dev);
  1581. i2c_del_adapter(&dev->adapter);
  1582. if (!dev->pdata->keep_ahb_clk_on) {
  1583. clk_put(dev->pclk);
  1584. }
  1585. clk_put(dev->clk);
  1586. if (dev->pdata->active_only)
  1587. i2c_qup_clk_path_unvote(dev);
  1588. i2c_qup_clk_path_teardown(dev);
  1589. if (dev->gsbi)
  1590. iounmap(dev->gsbi);
  1591. iounmap(dev->base);
  1592. pm_runtime_disable(&pdev->dev);
  1593. pm_runtime_set_suspended(&pdev->dev);
  1594. if (!(dev->pdata->use_gsbi_shared_mode))
  1595. qup_i2c_mem_release(pdev, "gsbi_qup_i2c_addr");
  1596. qup_i2c_mem_release(pdev, "qup_phys_addr");
  1597. if (dev->dev->of_node)
  1598. kfree(dev->pdata);
  1599. kfree(dev);
  1600. return 0;
  1601. }
  1602. #ifdef CONFIG_PM
  1603. static int i2c_qup_pm_suspend_runtime(struct device *device)
  1604. {
  1605. struct platform_device *pdev = to_platform_device(device);
  1606. struct qup_i2c_dev *dev = platform_get_drvdata(pdev);
  1607. dev_dbg(device, "pm_runtime: suspending...\n");
  1608. i2c_qup_pm_suspend(dev);
  1609. return 0;
  1610. }
  1611. static int i2c_qup_pm_resume_runtime(struct device *device)
  1612. {
  1613. struct platform_device *pdev = to_platform_device(device);
  1614. struct qup_i2c_dev *dev = platform_get_drvdata(pdev);
  1615. dev_dbg(device, "pm_runtime: resuming...\n");
  1616. i2c_qup_pm_resume(dev);
  1617. return 0;
  1618. }
  1619. static int i2c_qup_pm_suspend_sys(struct device *device)
  1620. {
  1621. struct platform_device *pdev = to_platform_device(device);
  1622. struct qup_i2c_dev *dev = platform_get_drvdata(pdev);
  1623. /* Acquire mutex to ensure current transaction is over */
  1624. mutex_lock(&dev->mlock);
  1625. dev->pwr_state = MSM_I2C_SYS_SUSPENDING;
  1626. mutex_unlock(&dev->mlock);
  1627. if (!pm_runtime_enabled(device) || !pm_runtime_suspended(device)) {
  1628. dev_dbg(device, "system suspend\n");
  1629. i2c_qup_pm_suspend(dev);
  1630. /*
  1631. * set the device's runtime PM status to 'suspended'
  1632. */
  1633. pm_runtime_disable(device);
  1634. pm_runtime_set_suspended(device);
  1635. pm_runtime_enable(device);
  1636. }
  1637. dev->pwr_state = MSM_I2C_SYS_SUSPENDED;
  1638. return 0;
  1639. }
  1640. static int i2c_qup_pm_resume_sys(struct device *device)
  1641. {
  1642. struct platform_device *pdev = to_platform_device(device);
  1643. struct qup_i2c_dev *dev = platform_get_drvdata(pdev);
  1644. /*
  1645. * Rely on runtime-PM to call resume in case it is enabled
  1646. * Even if it's not enabled, rely on 1st client transaction to do
  1647. * clock ON and gpio configuration
  1648. */
  1649. dev_dbg(device, "system resume\n");
  1650. dev->pwr_state = MSM_I2C_PM_SUSPENDED;
  1651. return 0;
  1652. }
  1653. #endif /* CONFIG_PM */
  1654. static const struct dev_pm_ops i2c_qup_dev_pm_ops = {
  1655. SET_SYSTEM_SLEEP_PM_OPS(
  1656. i2c_qup_pm_suspend_sys,
  1657. i2c_qup_pm_resume_sys
  1658. )
  1659. SET_RUNTIME_PM_OPS(
  1660. i2c_qup_pm_suspend_runtime,
  1661. i2c_qup_pm_resume_runtime,
  1662. NULL
  1663. )
  1664. };
  1665. static struct of_device_id i2c_qup_dt_match[] = {
  1666. {
  1667. .compatible = "qcom,i2c-qup",
  1668. },
  1669. {}
  1670. };
  1671. static struct platform_driver qup_i2c_driver = {
  1672. .probe = qup_i2c_probe,
  1673. .remove = __devexit_p(qup_i2c_remove),
  1674. .driver = {
  1675. .name = "qup_i2c",
  1676. .owner = THIS_MODULE,
  1677. .pm = &i2c_qup_dev_pm_ops,
  1678. .of_match_table = i2c_qup_dt_match,
  1679. },
  1680. };
  1681. /* QUP may be needed to bring up other drivers */
  1682. int __init qup_i2c_init_driver(void)
  1683. {
  1684. static bool initialized;
  1685. if (initialized)
  1686. return 0;
  1687. else
  1688. initialized = true;
  1689. return platform_driver_register(&qup_i2c_driver);
  1690. }
  1691. EXPORT_SYMBOL(qup_i2c_init_driver);
  1692. arch_initcall(qup_i2c_init_driver);
  1693. static void __exit qup_i2c_exit_driver(void)
  1694. {
  1695. platform_driver_unregister(&qup_i2c_driver);
  1696. }
  1697. module_exit(qup_i2c_exit_driver);