bcmsdh_sdmmc.c 34 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306
  1. /*
  2. * BCMSDH Function Driver for the native SDIO/MMC driver in the Linux Kernel
  3. *
  4. * Copyright (C) 1999-2010, Broadcom Corporation
  5. *
  6. * Unless you and Broadcom execute a separate written software license
  7. * agreement governing use of this software, this software is licensed to you
  8. * under the terms of the GNU General Public License version 2 (the "GPL"),
  9. * available at http://www.broadcom.com/licenses/GPLv2.php, with the
  10. * following added to such license:
  11. *
  12. * As a special exception, the copyright holders of this software give you
  13. * permission to link this software with independent modules, and to copy and
  14. * distribute the resulting executable under terms of your choice, provided that
  15. * you also meet, for each linked independent module, the terms and conditions of
  16. * the license of that module. An independent module is a module which is not
  17. * derived from this software. The special exception does not apply to any
  18. * modifications of the software.
  19. *
  20. * Notwithstanding the above, under no circumstances may you combine this
  21. * software in any way with any other Broadcom software provided under a license
  22. * other than the GPL, without Broadcom's express prior written consent.
  23. *
  24. * $Id: bcmsdh_sdmmc.c,v 1.1.2.5.6.29 2010/03/19 17:16:08 Exp $
  25. */
  26. #include <typedefs.h>
  27. #include <bcmdevs.h>
  28. #include <bcmendian.h>
  29. #include <bcmutils.h>
  30. #include <osl.h>
  31. #include <sdio.h> /* SDIO Device and Protocol Specs */
  32. #include <sdioh.h> /* SDIO Host Controller Specification */
  33. #include <bcmsdbus.h> /* bcmsdh to/from specific controller APIs */
  34. #include <sdiovar.h> /* ioctl/iovars */
  35. #include <linux/mmc/core.h>
  36. #include <linux/mmc/sdio_func.h>
  37. #include <linux/mmc/sdio_ids.h>
  38. #include <dngl_stats.h>
  39. #include <dhd.h>
  40. #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(CONFIG_PM_SLEEP)
  41. #include <linux/suspend.h>
  42. extern volatile bool dhd_mmc_suspend;
  43. #endif
  44. #include "bcmsdh_sdmmc.h"
  45. #ifndef BCMSDH_MODULE
  46. extern int sdio_function_init(void);
  47. extern void sdio_function_cleanup(void);
  48. #endif /* BCMSDH_MODULE */
  49. #if !defined(OOB_INTR_ONLY)
  50. static void IRQHandler(struct sdio_func *func);
  51. static void IRQHandlerF2(struct sdio_func *func);
  52. #endif
  53. static int sdioh_sdmmc_get_cisaddr(sdioh_info_t *sd, uint32 regaddr);
  54. //extern int sdio_reset_comm(struct mmc_card *card);
  55. extern PBCMSDH_SDMMC_INSTANCE gInstance;
  56. uint sd_sdmode = SDIOH_MODE_SD4; /* Use SD4 mode by default */
  57. uint sd_f2_blocksize = 512; /* Default blocksize */
  58. uint sd_divisor = 2; /* Default 48MHz/2 = 24MHz */
  59. uint sd_power = 1; /* Default to SD Slot powered ON */
  60. uint sd_clock = 1; /* Default to SD Clock turned ON */
  61. uint sd_hiok = FALSE; /* Don't use hi-speed mode by default */
  62. uint sd_msglevel = 0x01;
  63. uint sd_use_dma = TRUE;
  64. DHD_PM_RESUME_WAIT_INIT(sdioh_request_byte_wait);
  65. DHD_PM_RESUME_WAIT_INIT(sdioh_request_word_wait);
  66. DHD_PM_RESUME_WAIT_INIT(sdioh_request_packet_wait);
  67. DHD_PM_RESUME_WAIT_INIT(sdioh_request_buffer_wait);
  68. #define DMA_ALIGN_MASK 0x03
  69. int sdioh_sdmmc_card_regread(sdioh_info_t *sd, int func, uint32 regaddr, int regsize, uint32 *data);
  70. static int
  71. sdioh_sdmmc_card_enablefuncs(sdioh_info_t *sd)
  72. {
  73. int err_ret;
  74. uint32 fbraddr;
  75. uint8 func;
  76. sd_trace(("%s\n", __FUNCTION__));
  77. /* Get the Card's common CIS address */
  78. sd->com_cis_ptr = sdioh_sdmmc_get_cisaddr(sd, SDIOD_CCCR_CISPTR_0);
  79. sd->func_cis_ptr[0] = sd->com_cis_ptr;
  80. sd_info(("%s: Card's Common CIS Ptr = 0x%x\n", __FUNCTION__, sd->com_cis_ptr));
  81. /* Get the Card's function CIS (for each function) */
  82. for (fbraddr = SDIOD_FBR_STARTADDR, func = 1;
  83. func <= sd->num_funcs; func++, fbraddr += SDIOD_FBR_SIZE) {
  84. sd->func_cis_ptr[func] = sdioh_sdmmc_get_cisaddr(sd, SDIOD_FBR_CISPTR_0 + fbraddr);
  85. sd_info(("%s: Function %d CIS Ptr = 0x%x\n",
  86. __FUNCTION__, func, sd->func_cis_ptr[func]));
  87. }
  88. sd->func_cis_ptr[0] = sd->com_cis_ptr;
  89. sd_info(("%s: Card's Common CIS Ptr = 0x%x\n", __FUNCTION__, sd->com_cis_ptr));
  90. /* Enable Function 1 */
  91. sdio_claim_host(gInstance->func[1]);
  92. err_ret = sdio_enable_func(gInstance->func[1]);
  93. sdio_release_host(gInstance->func[1]);
  94. if (err_ret) {
  95. sd_err(("bcmsdh_sdmmc: Failed to enable F1 Err: 0x%08x", err_ret));
  96. }
  97. return FALSE;
  98. }
  99. /*
  100. * Public entry points & extern's
  101. */
  102. extern sdioh_info_t *
  103. sdioh_attach(osl_t *osh, void *bar0, uint irq)
  104. {
  105. sdioh_info_t *sd;
  106. int err_ret;
  107. sd_trace(("%s\n", __FUNCTION__));
  108. if (gInstance == NULL) {
  109. sd_err(("%s: SDIO Device not present\n", __FUNCTION__));
  110. return NULL;
  111. }
  112. if ((sd = (sdioh_info_t *)MALLOC(osh, sizeof(sdioh_info_t))) == NULL) {
  113. sd_err(("sdioh_attach: out of memory, malloced %d bytes\n", MALLOCED(osh)));
  114. return NULL;
  115. }
  116. bzero((char *)sd, sizeof(sdioh_info_t));
  117. sd->osh = osh;
  118. if (sdioh_sdmmc_osinit(sd) != 0) {
  119. sd_err(("%s:sdioh_sdmmc_osinit() failed\n", __FUNCTION__));
  120. MFREE(sd->osh, sd, sizeof(sdioh_info_t));
  121. return NULL;
  122. }
  123. sd->num_funcs = 2;
  124. sd->sd_blockmode = TRUE;
  125. sd->use_client_ints = TRUE;
  126. sd->client_block_size[0] = 64;
  127. gInstance->sd = sd;
  128. /* Claim host controller */
  129. sdio_claim_host(gInstance->func[1]);
  130. sd->client_block_size[1] = 64;
  131. err_ret = sdio_set_block_size(gInstance->func[1], 64);
  132. if (err_ret) {
  133. sd_err(("bcmsdh_sdmmc: Failed to set F1 blocksize\n"));
  134. }
  135. /* Release host controller F1 */
  136. sdio_release_host(gInstance->func[1]);
  137. if (gInstance->func[2]) {
  138. /* Claim host controller F2 */
  139. sdio_claim_host(gInstance->func[2]);
  140. sd->client_block_size[2] = sd_f2_blocksize;
  141. err_ret = sdio_set_block_size(gInstance->func[2], sd_f2_blocksize);
  142. if (err_ret) {
  143. sd_err(("bcmsdh_sdmmc: Failed to set F2 blocksize to %d\n",
  144. sd_f2_blocksize));
  145. }
  146. /* Release host controller F2 */
  147. sdio_release_host(gInstance->func[2]);
  148. }
  149. sdioh_sdmmc_card_enablefuncs(sd);
  150. sd_trace(("%s: Done\n", __FUNCTION__));
  151. return sd;
  152. }
  153. extern SDIOH_API_RC
  154. sdioh_detach(osl_t *osh, sdioh_info_t *sd)
  155. {
  156. sd_trace(("%s\n", __FUNCTION__));
  157. if (sd) {
  158. /* Disable Function 2 */
  159. sdio_claim_host(gInstance->func[2]);
  160. sdio_disable_func(gInstance->func[2]);
  161. sdio_release_host(gInstance->func[2]);
  162. /* Disable Function 1 */
  163. sdio_claim_host(gInstance->func[1]);
  164. sdio_disable_func(gInstance->func[1]);
  165. sdio_release_host(gInstance->func[1]);
  166. /* deregister irq */
  167. sdioh_sdmmc_osfree(sd);
  168. MFREE(sd->osh, sd, sizeof(sdioh_info_t));
  169. }
  170. return SDIOH_API_RC_SUCCESS;
  171. }
  172. #if defined(OOB_INTR_ONLY) && defined(HW_OOB)
  173. extern SDIOH_API_RC
  174. sdioh_enable_func_intr(void)
  175. {
  176. uint8 reg;
  177. int err;
  178. if (gInstance->func[0]) {
  179. sdio_claim_host(gInstance->func[0]);
  180. reg = sdio_readb(gInstance->func[0], SDIOD_CCCR_INTEN, &err);
  181. if (err) {
  182. sd_err(("%s: error for read SDIO_CCCR_IENx : 0x%x\n", __FUNCTION__, err));
  183. sdio_release_host(gInstance->func[0]);
  184. return SDIOH_API_RC_FAIL;
  185. }
  186. /* Enable F1 and F2 interrupts, set master enable */
  187. reg |= (INTR_CTL_FUNC1_EN | INTR_CTL_FUNC2_EN | INTR_CTL_MASTER_EN);
  188. sdio_writeb(gInstance->func[0], reg, SDIOD_CCCR_INTEN, &err);
  189. sdio_release_host(gInstance->func[0]);
  190. if (err) {
  191. sd_err(("%s: error for write SDIO_CCCR_IENx : 0x%x\n", __FUNCTION__, err));
  192. return SDIOH_API_RC_FAIL;
  193. }
  194. }
  195. return SDIOH_API_RC_SUCCESS;
  196. }
  197. extern SDIOH_API_RC
  198. sdioh_disable_func_intr(void)
  199. {
  200. uint8 reg;
  201. int err;
  202. if (gInstance->func[0]) {
  203. sdio_claim_host(gInstance->func[0]);
  204. reg = sdio_readb(gInstance->func[0], SDIOD_CCCR_INTEN, &err);
  205. if (err) {
  206. sd_err(("%s: error for read SDIO_CCCR_IENx : 0x%x\n", __FUNCTION__, err));
  207. sdio_release_host(gInstance->func[0]);
  208. return SDIOH_API_RC_FAIL;
  209. }
  210. reg &= ~(INTR_CTL_FUNC1_EN | INTR_CTL_FUNC2_EN);
  211. /* Disable master interrupt with the last function interrupt */
  212. if (!(reg & 0xFE))
  213. reg = 0;
  214. sdio_writeb(gInstance->func[0], reg, SDIOD_CCCR_INTEN, &err);
  215. sdio_release_host(gInstance->func[0]);
  216. if (err) {
  217. sd_err(("%s: error for write SDIO_CCCR_IENx : 0x%x\n", __FUNCTION__, err));
  218. return SDIOH_API_RC_FAIL;
  219. }
  220. }
  221. return SDIOH_API_RC_SUCCESS;
  222. }
  223. #endif /* defined(OOB_INTR_ONLY) && defined(HW_OOB) */
  224. /* Configure callback to client when we recieve client interrupt */
  225. extern SDIOH_API_RC
  226. sdioh_interrupt_register(sdioh_info_t *sd, sdioh_cb_fn_t fn, void *argh)
  227. {
  228. sd_trace(("%s: Entering\n", __FUNCTION__));
  229. if (fn == NULL) {
  230. sd_err(("%s: interrupt handler is NULL, not registering\n", __FUNCTION__));
  231. return SDIOH_API_RC_FAIL;
  232. }
  233. #if !defined(OOB_INTR_ONLY)
  234. sd->intr_handler = fn;
  235. sd->intr_handler_arg = argh;
  236. sd->intr_handler_valid = TRUE;
  237. /* register and unmask irq */
  238. if (gInstance->func[2]) {
  239. sdio_claim_host(gInstance->func[2]);
  240. sdio_claim_irq(gInstance->func[2], IRQHandlerF2);
  241. sdio_release_host(gInstance->func[2]);
  242. }
  243. if (gInstance->func[1]) {
  244. sdio_claim_host(gInstance->func[1]);
  245. sdio_claim_irq(gInstance->func[1], IRQHandler);
  246. sdio_release_host(gInstance->func[1]);
  247. }
  248. #elif defined(HW_OOB)
  249. sdioh_enable_func_intr();
  250. #endif /* defined(OOB_INTR_ONLY) */
  251. return SDIOH_API_RC_SUCCESS;
  252. }
  253. extern SDIOH_API_RC
  254. sdioh_interrupt_deregister(sdioh_info_t *sd)
  255. {
  256. sd_trace(("%s: Entering\n", __FUNCTION__));
  257. #if !defined(OOB_INTR_ONLY)
  258. if (gInstance->func[1]) {
  259. /* register and unmask irq */
  260. sdio_claim_host(gInstance->func[1]);
  261. sdio_release_irq(gInstance->func[1]);
  262. sdio_release_host(gInstance->func[1]);
  263. }
  264. if (gInstance->func[2]) {
  265. /* Claim host controller F2 */
  266. sdio_claim_host(gInstance->func[2]);
  267. sdio_release_irq(gInstance->func[2]);
  268. /* Release host controller F2 */
  269. sdio_release_host(gInstance->func[2]);
  270. }
  271. sd->intr_handler_valid = FALSE;
  272. sd->intr_handler = NULL;
  273. sd->intr_handler_arg = NULL;
  274. #elif defined(HW_OOB)
  275. sdioh_disable_func_intr();
  276. #endif /* !defined(OOB_INTR_ONLY) */
  277. return SDIOH_API_RC_SUCCESS;
  278. }
  279. extern SDIOH_API_RC
  280. sdioh_interrupt_query(sdioh_info_t *sd, bool *onoff)
  281. {
  282. sd_trace(("%s: Entering\n", __FUNCTION__));
  283. *onoff = sd->client_intr_enabled;
  284. return SDIOH_API_RC_SUCCESS;
  285. }
  286. #if defined(DHD_DEBUG)
  287. extern bool
  288. sdioh_interrupt_pending(sdioh_info_t *sd)
  289. {
  290. return (0);
  291. }
  292. #endif
  293. uint
  294. sdioh_query_iofnum(sdioh_info_t *sd)
  295. {
  296. return sd->num_funcs;
  297. }
  298. /* IOVar table */
  299. enum {
  300. IOV_MSGLEVEL = 1,
  301. IOV_BLOCKMODE,
  302. IOV_BLOCKSIZE,
  303. IOV_DMA,
  304. IOV_USEINTS,
  305. IOV_NUMINTS,
  306. IOV_NUMLOCALINTS,
  307. IOV_HOSTREG,
  308. IOV_DEVREG,
  309. IOV_DIVISOR,
  310. IOV_SDMODE,
  311. IOV_HISPEED,
  312. IOV_HCIREGS,
  313. IOV_POWER,
  314. IOV_CLOCK,
  315. IOV_RXCHAIN
  316. };
  317. const bcm_iovar_t sdioh_iovars[] = {
  318. {"sd_msglevel", IOV_MSGLEVEL, 0, IOVT_UINT32, 0 },
  319. {"sd_blockmode", IOV_BLOCKMODE, 0, IOVT_BOOL, 0 },
  320. {"sd_blocksize", IOV_BLOCKSIZE, 0, IOVT_UINT32, 0 }, /* ((fn << 16) | size) */
  321. {"sd_dma", IOV_DMA, 0, IOVT_BOOL, 0 },
  322. {"sd_ints", IOV_USEINTS, 0, IOVT_BOOL, 0 },
  323. {"sd_numints", IOV_NUMINTS, 0, IOVT_UINT32, 0 },
  324. {"sd_numlocalints", IOV_NUMLOCALINTS, 0, IOVT_UINT32, 0 },
  325. {"sd_hostreg", IOV_HOSTREG, 0, IOVT_BUFFER, sizeof(sdreg_t) },
  326. {"sd_devreg", IOV_DEVREG, 0, IOVT_BUFFER, sizeof(sdreg_t) },
  327. {"sd_divisor", IOV_DIVISOR, 0, IOVT_UINT32, 0 },
  328. {"sd_power", IOV_POWER, 0, IOVT_UINT32, 0 },
  329. {"sd_clock", IOV_CLOCK, 0, IOVT_UINT32, 0 },
  330. {"sd_mode", IOV_SDMODE, 0, IOVT_UINT32, 100},
  331. {"sd_highspeed", IOV_HISPEED, 0, IOVT_UINT32, 0 },
  332. {"sd_rxchain", IOV_RXCHAIN, 0, IOVT_BOOL, 0 },
  333. {NULL, 0, 0, 0, 0 }
  334. };
  335. int
  336. sdioh_iovar_op(sdioh_info_t *si, const char *name,
  337. void *params, int plen, void *arg, int len, bool set)
  338. {
  339. const bcm_iovar_t *vi = NULL;
  340. int bcmerror = 0;
  341. int val_size;
  342. int32 int_val = 0;
  343. bool bool_val;
  344. uint32 actionid;
  345. ASSERT(name);
  346. ASSERT(len >= 0);
  347. /* Get must have return space; Set does not take qualifiers */
  348. ASSERT(set || (arg && len));
  349. ASSERT(!set || (!params && !plen));
  350. sd_trace(("%s: Enter (%s %s)\n", __FUNCTION__, (set ? "set" : "get"), name));
  351. if ((vi = bcm_iovar_lookup(sdioh_iovars, name)) == NULL) {
  352. bcmerror = BCME_UNSUPPORTED;
  353. goto exit;
  354. }
  355. if ((bcmerror = bcm_iovar_lencheck(vi, arg, len, set)) != 0)
  356. goto exit;
  357. /* Set up params so get and set can share the convenience variables */
  358. if (params == NULL) {
  359. params = arg;
  360. plen = len;
  361. }
  362. if (vi->type == IOVT_VOID)
  363. val_size = 0;
  364. else if (vi->type == IOVT_BUFFER)
  365. val_size = len;
  366. else
  367. val_size = sizeof(int);
  368. if (plen >= (int)sizeof(int_val))
  369. bcopy(params, &int_val, sizeof(int_val));
  370. bool_val = (int_val != 0) ? TRUE : FALSE;
  371. actionid = set ? IOV_SVAL(vi->varid) : IOV_GVAL(vi->varid);
  372. switch (actionid) {
  373. case IOV_GVAL(IOV_MSGLEVEL):
  374. int_val = (int32)sd_msglevel;
  375. bcopy(&int_val, arg, val_size);
  376. break;
  377. case IOV_SVAL(IOV_MSGLEVEL):
  378. sd_msglevel = int_val;
  379. break;
  380. case IOV_GVAL(IOV_BLOCKMODE):
  381. int_val = (int32)si->sd_blockmode;
  382. bcopy(&int_val, arg, val_size);
  383. break;
  384. case IOV_SVAL(IOV_BLOCKMODE):
  385. si->sd_blockmode = (bool)int_val;
  386. /* Haven't figured out how to make non-block mode with DMA */
  387. break;
  388. case IOV_GVAL(IOV_BLOCKSIZE):
  389. if ((uint32)int_val > si->num_funcs) {
  390. bcmerror = BCME_BADARG;
  391. break;
  392. }
  393. int_val = (int32)si->client_block_size[int_val];
  394. bcopy(&int_val, arg, val_size);
  395. break;
  396. case IOV_SVAL(IOV_BLOCKSIZE):
  397. {
  398. uint func = ((uint32)int_val >> 16);
  399. uint blksize = (uint16)int_val;
  400. uint maxsize;
  401. if (func > si->num_funcs) {
  402. bcmerror = BCME_BADARG;
  403. break;
  404. }
  405. switch (func) {
  406. case 0: maxsize = 32; break;
  407. case 1: maxsize = BLOCK_SIZE_4318; break;
  408. case 2: maxsize = BLOCK_SIZE_4328; break;
  409. default: maxsize = 0;
  410. }
  411. if (blksize > maxsize) {
  412. bcmerror = BCME_BADARG;
  413. break;
  414. }
  415. if (!blksize) {
  416. blksize = maxsize;
  417. }
  418. /* Now set it */
  419. si->client_block_size[func] = blksize;
  420. break;
  421. }
  422. case IOV_GVAL(IOV_RXCHAIN):
  423. int_val = FALSE;
  424. bcopy(&int_val, arg, val_size);
  425. break;
  426. case IOV_GVAL(IOV_DMA):
  427. int_val = (int32)si->sd_use_dma;
  428. bcopy(&int_val, arg, val_size);
  429. break;
  430. case IOV_SVAL(IOV_DMA):
  431. si->sd_use_dma = (bool)int_val;
  432. break;
  433. case IOV_GVAL(IOV_USEINTS):
  434. int_val = (int32)si->use_client_ints;
  435. bcopy(&int_val, arg, val_size);
  436. break;
  437. case IOV_SVAL(IOV_USEINTS):
  438. si->use_client_ints = (bool)int_val;
  439. if (si->use_client_ints)
  440. si->intmask |= CLIENT_INTR;
  441. else
  442. si->intmask &= ~CLIENT_INTR;
  443. break;
  444. case IOV_GVAL(IOV_DIVISOR):
  445. int_val = (uint32)sd_divisor;
  446. bcopy(&int_val, arg, val_size);
  447. break;
  448. case IOV_SVAL(IOV_DIVISOR):
  449. sd_divisor = int_val;
  450. break;
  451. case IOV_GVAL(IOV_POWER):
  452. int_val = (uint32)sd_power;
  453. bcopy(&int_val, arg, val_size);
  454. break;
  455. case IOV_SVAL(IOV_POWER):
  456. sd_power = int_val;
  457. break;
  458. case IOV_GVAL(IOV_CLOCK):
  459. int_val = (uint32)sd_clock;
  460. bcopy(&int_val, arg, val_size);
  461. break;
  462. case IOV_SVAL(IOV_CLOCK):
  463. sd_clock = int_val;
  464. break;
  465. case IOV_GVAL(IOV_SDMODE):
  466. int_val = (uint32)sd_sdmode;
  467. bcopy(&int_val, arg, val_size);
  468. break;
  469. case IOV_SVAL(IOV_SDMODE):
  470. sd_sdmode = int_val;
  471. break;
  472. case IOV_GVAL(IOV_HISPEED):
  473. int_val = (uint32)sd_hiok;
  474. bcopy(&int_val, arg, val_size);
  475. break;
  476. case IOV_SVAL(IOV_HISPEED):
  477. sd_hiok = int_val;
  478. break;
  479. case IOV_GVAL(IOV_NUMINTS):
  480. int_val = (int32)si->intrcount;
  481. bcopy(&int_val, arg, val_size);
  482. break;
  483. case IOV_GVAL(IOV_NUMLOCALINTS):
  484. int_val = (int32)0;
  485. bcopy(&int_val, arg, val_size);
  486. break;
  487. case IOV_GVAL(IOV_HOSTREG):
  488. {
  489. sdreg_t *sd_ptr = (sdreg_t *)params;
  490. if (sd_ptr->offset < SD_SysAddr || sd_ptr->offset > SD_MaxCurCap) {
  491. sd_err(("%s: bad offset 0x%x\n", __FUNCTION__, sd_ptr->offset));
  492. bcmerror = BCME_BADARG;
  493. break;
  494. }
  495. sd_trace(("%s: rreg%d at offset %d\n", __FUNCTION__,
  496. (sd_ptr->offset & 1) ? 8 : ((sd_ptr->offset & 2) ? 16 : 32),
  497. sd_ptr->offset));
  498. if (sd_ptr->offset & 1)
  499. int_val = 8; /* sdioh_sdmmc_rreg8(si, sd_ptr->offset); */
  500. else if (sd_ptr->offset & 2)
  501. int_val = 16; /* sdioh_sdmmc_rreg16(si, sd_ptr->offset); */
  502. else
  503. int_val = 32; /* sdioh_sdmmc_rreg(si, sd_ptr->offset); */
  504. bcopy(&int_val, arg, sizeof(int_val));
  505. break;
  506. }
  507. case IOV_SVAL(IOV_HOSTREG):
  508. {
  509. sdreg_t *sd_ptr = (sdreg_t *)params;
  510. if (sd_ptr->offset < SD_SysAddr || sd_ptr->offset > SD_MaxCurCap) {
  511. sd_err(("%s: bad offset 0x%x\n", __FUNCTION__, sd_ptr->offset));
  512. bcmerror = BCME_BADARG;
  513. break;
  514. }
  515. sd_trace(("%s: wreg%d value 0x%08x at offset %d\n", __FUNCTION__, sd_ptr->value,
  516. (sd_ptr->offset & 1) ? 8 : ((sd_ptr->offset & 2) ? 16 : 32),
  517. sd_ptr->offset));
  518. break;
  519. }
  520. case IOV_GVAL(IOV_DEVREG):
  521. {
  522. sdreg_t *sd_ptr = (sdreg_t *)params;
  523. uint8 data = 0;
  524. if (sdioh_cfg_read(si, sd_ptr->func, sd_ptr->offset, &data)) {
  525. bcmerror = BCME_SDIO_ERROR;
  526. break;
  527. }
  528. int_val = (int)data;
  529. bcopy(&int_val, arg, sizeof(int_val));
  530. break;
  531. }
  532. case IOV_SVAL(IOV_DEVREG):
  533. {
  534. sdreg_t *sd_ptr = (sdreg_t *)params;
  535. uint8 data = (uint8)sd_ptr->value;
  536. if (sdioh_cfg_write(si, sd_ptr->func, sd_ptr->offset, &data)) {
  537. bcmerror = BCME_SDIO_ERROR;
  538. break;
  539. }
  540. break;
  541. }
  542. default:
  543. bcmerror = BCME_UNSUPPORTED;
  544. break;
  545. }
  546. exit:
  547. return bcmerror;
  548. }
  549. #if defined(OOB_INTR_ONLY) && defined(HW_OOB)
  550. SDIOH_API_RC
  551. sdioh_enable_hw_oob_intr(sdioh_info_t *sd, bool enable)
  552. {
  553. SDIOH_API_RC status;
  554. uint8 data;
  555. if (enable)
  556. data = 3; /* enable hw oob interrupt */
  557. else
  558. data = 4; /* disable hw oob interrupt */
  559. status = sdioh_request_byte(sd, SDIOH_WRITE, 0, 0xf2, &data);
  560. return status;
  561. }
  562. #endif /* defined(OOB_INTR_ONLY) && defined(HW_OOB) */
  563. extern SDIOH_API_RC
  564. sdioh_cfg_read(sdioh_info_t *sd, uint fnc_num, uint32 addr, uint8 *data)
  565. {
  566. SDIOH_API_RC status;
  567. /* No lock needed since sdioh_request_byte does locking */
  568. status = sdioh_request_byte(sd, SDIOH_READ, fnc_num, addr, data);
  569. return status;
  570. }
  571. extern SDIOH_API_RC
  572. sdioh_cfg_write(sdioh_info_t *sd, uint fnc_num, uint32 addr, uint8 *data)
  573. {
  574. /* No lock needed since sdioh_request_byte does locking */
  575. SDIOH_API_RC status;
  576. status = sdioh_request_byte(sd, SDIOH_WRITE, fnc_num, addr, data);
  577. return status;
  578. }
  579. static int
  580. sdioh_sdmmc_get_cisaddr(sdioh_info_t *sd, uint32 regaddr)
  581. {
  582. /* read 24 bits and return valid 17 bit addr */
  583. int i;
  584. uint32 scratch, regdata;
  585. uint8 *ptr = (uint8 *)&scratch;
  586. for (i = 0; i < 3; i++) {
  587. if ((sdioh_sdmmc_card_regread (sd, 0, regaddr, 1, &regdata)) != SUCCESS)
  588. sd_err(("%s: Can't read!\n", __FUNCTION__));
  589. *ptr++ = (uint8) regdata;
  590. regaddr++;
  591. }
  592. /* Only the lower 17-bits are valid */
  593. scratch = ltoh32(scratch);
  594. scratch &= 0x0001FFFF;
  595. return (scratch);
  596. }
  597. extern SDIOH_API_RC
  598. sdioh_cis_read(sdioh_info_t *sd, uint func, uint8 *cisd, uint32 length)
  599. {
  600. uint32 count;
  601. int offset;
  602. uint32 foo;
  603. uint8 *cis = cisd;
  604. sd_trace(("%s: Func = %d\n", __FUNCTION__, func));
  605. if (!sd->func_cis_ptr[func]) {
  606. bzero(cis, length);
  607. sd_err(("%s: no func_cis_ptr[%d]\n", __FUNCTION__, func));
  608. return SDIOH_API_RC_FAIL;
  609. }
  610. sd_err(("%s: func_cis_ptr[%d]=0x%04x\n", __FUNCTION__, func, sd->func_cis_ptr[func]));
  611. for (count = 0; count < length; count++) {
  612. offset = sd->func_cis_ptr[func] + count;
  613. if (sdioh_sdmmc_card_regread (sd, 0, offset, 1, &foo) < 0) {
  614. sd_err(("%s: regread failed: Can't read CIS\n", __FUNCTION__));
  615. return SDIOH_API_RC_FAIL;
  616. }
  617. *cis = (uint8)(foo & 0xff);
  618. cis++;
  619. }
  620. return SDIOH_API_RC_SUCCESS;
  621. }
  622. extern SDIOH_API_RC
  623. sdioh_request_byte(sdioh_info_t *sd, uint rw, uint func, uint regaddr, uint8 *byte)
  624. {
  625. int err_ret;
  626. sd_info(("%s: rw=%d, func=%d, addr=0x%05x\n", __FUNCTION__, rw, func, regaddr));
  627. DHD_PM_RESUME_WAIT(sdioh_request_byte_wait);
  628. DHD_PM_RESUME_RETURN_ERROR(SDIOH_API_RC_FAIL);
  629. if(rw) { /* CMD52 Write */
  630. if (func == 0) {
  631. /* Can only directly write to some F0 registers. Handle F2 enable
  632. * as a special case.
  633. */
  634. if (regaddr == SDIOD_CCCR_IOEN) {
  635. if (gInstance->func[2]) {
  636. sdio_claim_host(gInstance->func[2]);
  637. if (*byte & SDIO_FUNC_ENABLE_2) {
  638. /* Enable Function 2 */
  639. err_ret = sdio_enable_func(gInstance->func[2]);
  640. if (err_ret) {
  641. sd_err(("bcmsdh_sdmmc: enable F2 failed:%d",
  642. err_ret));
  643. }
  644. } else {
  645. /* Disable Function 2 */
  646. err_ret = sdio_disable_func(gInstance->func[2]);
  647. if (err_ret) {
  648. sd_err(("bcmsdh_sdmmc: Disab F2 failed:%d",
  649. err_ret));
  650. }
  651. }
  652. sdio_release_host(gInstance->func[2]);
  653. }
  654. }
  655. #if defined(MMC_SDIO_ABORT)
  656. /* to allow abort command through F1 */
  657. else if (regaddr == SDIOD_CCCR_IOABORT) {
  658. sdio_claim_host(gInstance->func[func]);
  659. /*
  660. * this sdio_f0_writeb() can be replaced with another api
  661. * depending upon MMC driver change.
  662. * As of this time, this is temporaray one
  663. */
  664. sdio_writeb(gInstance->func[func], *byte, regaddr, &err_ret);
  665. sdio_release_host(gInstance->func[func]);
  666. }
  667. #endif /* MMC_SDIO_ABORT */
  668. else if (regaddr < 0xF0) {
  669. sd_err(("bcmsdh_sdmmc: F0 Wr:0x%02x: write disallowed\n", regaddr));
  670. } else {
  671. /* Claim host controller, perform F0 write, and release */
  672. sdio_claim_host(gInstance->func[func]);
  673. sdio_f0_writeb(gInstance->func[func], *byte, regaddr, &err_ret);
  674. sdio_release_host(gInstance->func[func]);
  675. }
  676. } else {
  677. /* Claim host controller, perform Fn write, and release */
  678. sdio_claim_host(gInstance->func[func]);
  679. sdio_writeb(gInstance->func[func], *byte, regaddr, &err_ret);
  680. sdio_release_host(gInstance->func[func]);
  681. }
  682. } else { /* CMD52 Read */
  683. /* Claim host controller, perform Fn read, and release */
  684. sdio_claim_host(gInstance->func[func]);
  685. if (func == 0) {
  686. *byte = sdio_f0_readb(gInstance->func[func], regaddr, &err_ret);
  687. } else {
  688. *byte = sdio_readb(gInstance->func[func], regaddr, &err_ret);
  689. }
  690. sdio_release_host(gInstance->func[func]);
  691. }
  692. if (err_ret) {
  693. sd_err(("bcmsdh_sdmmc: Failed to %s byte F%d:@0x%05x=%02x, Err: %d\n",
  694. rw ? "Write" : "Read", func, regaddr, *byte, err_ret));
  695. }
  696. return ((err_ret == 0) ? SDIOH_API_RC_SUCCESS : SDIOH_API_RC_FAIL);
  697. }
  698. extern SDIOH_API_RC
  699. sdioh_request_word(sdioh_info_t *sd, uint cmd_type, uint rw, uint func, uint addr,
  700. uint32 *word, uint nbytes)
  701. {
  702. int err_ret = SDIOH_API_RC_FAIL;
  703. if (func == 0) {
  704. sd_err(("%s: Only CMD52 allowed to F0.\n", __FUNCTION__));
  705. return SDIOH_API_RC_FAIL;
  706. }
  707. sd_info(("%s: cmd_type=%d, rw=%d, func=%d, addr=0x%05x, nbytes=%d\n",
  708. __FUNCTION__, cmd_type, rw, func, addr, nbytes));
  709. DHD_PM_RESUME_WAIT(sdioh_request_word_wait);
  710. DHD_PM_RESUME_RETURN_ERROR(SDIOH_API_RC_FAIL);
  711. /* Claim host controller */
  712. sdio_claim_host(gInstance->func[func]);
  713. if(rw) { /* CMD52 Write */
  714. if (nbytes == 4) {
  715. sdio_writel(gInstance->func[func], *word, addr, &err_ret);
  716. } else if (nbytes == 2) {
  717. sdio_writew(gInstance->func[func], (*word & 0xFFFF), addr, &err_ret);
  718. } else {
  719. sd_err(("%s: Invalid nbytes: %d\n", __FUNCTION__, nbytes));
  720. }
  721. } else { /* CMD52 Read */
  722. if (nbytes == 4) {
  723. *word = sdio_readl(gInstance->func[func], addr, &err_ret);
  724. } else if (nbytes == 2) {
  725. *word = sdio_readw(gInstance->func[func], addr, &err_ret) & 0xFFFF;
  726. } else {
  727. sd_err(("%s: Invalid nbytes: %d\n", __FUNCTION__, nbytes));
  728. }
  729. }
  730. /* Release host controller */
  731. sdio_release_host(gInstance->func[func]);
  732. if (err_ret) {
  733. sd_err(("bcmsdh_sdmmc: Failed to %s word, Err: 0x%08x",
  734. rw ? "Write" : "Read", err_ret));
  735. }
  736. return ((err_ret == 0) ? SDIOH_API_RC_SUCCESS : SDIOH_API_RC_FAIL);
  737. }
  738. static SDIOH_API_RC
  739. sdioh_request_packet(sdioh_info_t *sd, uint fix_inc, uint write, uint func,
  740. uint addr, void *pkt)
  741. {
  742. bool fifo = (fix_inc == SDIOH_DATA_FIX);
  743. uint32 SGCount = 0;
  744. int err_ret = 0;
  745. void *pnext;
  746. sd_trace(("%s: Enter\n", __FUNCTION__));
  747. ASSERT(pkt);
  748. DHD_PM_RESUME_WAIT(sdioh_request_packet_wait);
  749. DHD_PM_RESUME_RETURN_ERROR(SDIOH_API_RC_FAIL);
  750. /* Claim host controller */
  751. sdio_claim_host(gInstance->func[func]);
  752. for (pnext = pkt; pnext; pnext = PKTNEXT(sd->osh, pnext)) {
  753. uint pkt_len = PKTLEN(sd->osh, pnext);
  754. pkt_len += 3;
  755. pkt_len &= 0xFFFFFFFC;
  756. #ifdef CONFIG_MMC_MSM7X00A
  757. if ((pkt_len % 64) == 32) {
  758. sd_trace(("%s: Rounding up TX packet +=32\n", __FUNCTION__));
  759. pkt_len += 32;
  760. }
  761. #endif /* CONFIG_MMC_MSM7X00A */
  762. /* Make sure the packet is aligned properly. If it isn't, then this
  763. * is the fault of sdioh_request_buffer() which is supposed to give
  764. * us something we can work with.
  765. */
  766. ASSERT(((uint32)(PKTDATA(sd->osh, pkt)) & DMA_ALIGN_MASK) == 0);
  767. if ((write) && (!fifo)) {
  768. err_ret = sdio_memcpy_toio(gInstance->func[func], addr,
  769. ((uint8*)PKTDATA(sd->osh, pnext)),
  770. pkt_len);
  771. } else if (write) {
  772. err_ret = sdio_memcpy_toio(gInstance->func[func], addr,
  773. ((uint8*)PKTDATA(sd->osh, pnext)),
  774. pkt_len);
  775. } else if (fifo) {
  776. err_ret = sdio_readsb(gInstance->func[func],
  777. ((uint8*)PKTDATA(sd->osh, pnext)),
  778. addr,
  779. pkt_len);
  780. } else {
  781. err_ret = sdio_memcpy_fromio(gInstance->func[func],
  782. ((uint8*)PKTDATA(sd->osh, pnext)),
  783. addr,
  784. pkt_len);
  785. }
  786. if (err_ret) {
  787. sd_err(("%s: %s FAILED %p[%d], addr=0x%05x, pkt_len=%d, ERR=0x%08x\n",
  788. __FUNCTION__,
  789. (write) ? "TX" : "RX",
  790. pnext, SGCount, addr, pkt_len, err_ret));
  791. } else {
  792. sd_trace(("%s: %s xfr'd %p[%d], addr=0x%05x, len=%d\n",
  793. __FUNCTION__,
  794. (write) ? "TX" : "RX",
  795. pnext, SGCount, addr, pkt_len));
  796. }
  797. if (!fifo) {
  798. addr += pkt_len;
  799. }
  800. SGCount ++;
  801. }
  802. /* Release host controller */
  803. sdio_release_host(gInstance->func[func]);
  804. sd_trace(("%s: Exit\n", __FUNCTION__));
  805. return ((err_ret == 0) ? SDIOH_API_RC_SUCCESS : SDIOH_API_RC_FAIL);
  806. }
  807. /*
  808. * This function takes a buffer or packet, and fixes everything up so that in the
  809. * end, a DMA-able packet is created.
  810. *
  811. * A buffer does not have an associated packet pointer, and may or may not be aligned.
  812. * A packet may consist of a single packet, or a packet chain. If it is a packet chain,
  813. * then all the packets in the chain must be properly aligned. If the packet data is not
  814. * aligned, then there may only be one packet, and in this case, it is copied to a new
  815. * aligned packet.
  816. *
  817. */
  818. extern SDIOH_API_RC
  819. sdioh_request_buffer(sdioh_info_t *sd, uint pio_dma, uint fix_inc, uint write, uint func,
  820. uint addr, uint reg_width, uint buflen_u, uint8 *buffer, void *pkt)
  821. {
  822. SDIOH_API_RC Status;
  823. void *mypkt = NULL;
  824. sd_trace(("%s: Enter\n", __FUNCTION__));
  825. DHD_PM_RESUME_WAIT(sdioh_request_buffer_wait);
  826. DHD_PM_RESUME_RETURN_ERROR(SDIOH_API_RC_FAIL);
  827. /* Case 1: we don't have a packet. */
  828. if (pkt == NULL) {
  829. sd_data(("%s: Creating new %s Packet, len=%d\n",
  830. __FUNCTION__, write ? "TX" : "RX", buflen_u));
  831. #ifdef DHD_USE_STATIC_BUF
  832. if (!(mypkt = PKTGET_STATIC(sd->osh, buflen_u, write ? TRUE : FALSE))) {
  833. #else
  834. if (!(mypkt = PKTGET(sd->osh, buflen_u, write ? TRUE : FALSE))) {
  835. #endif /* DHD_USE_STATIC_BUF */
  836. sd_err(("%s: PKTGET failed: len %d\n",
  837. __FUNCTION__, buflen_u));
  838. return SDIOH_API_RC_FAIL;
  839. }
  840. /* For a write, copy the buffer data into the packet. */
  841. if (write) {
  842. bcopy(buffer, PKTDATA(sd->osh, mypkt), buflen_u);
  843. }
  844. Status = sdioh_request_packet(sd, fix_inc, write, func, addr, mypkt);
  845. /* For a read, copy the packet data back to the buffer. */
  846. if (!write) {
  847. bcopy(PKTDATA(sd->osh, mypkt), buffer, buflen_u);
  848. }
  849. #ifdef DHD_USE_STATIC_BUF
  850. PKTFREE_STATIC(sd->osh, mypkt, write ? TRUE : FALSE);
  851. #else
  852. PKTFREE(sd->osh, mypkt, write ? TRUE : FALSE);
  853. #endif /* DHD_USE_STATIC_BUF */
  854. } else if (((uint32)(PKTDATA(sd->osh, pkt)) & DMA_ALIGN_MASK) != 0) {
  855. /* Case 2: We have a packet, but it is unaligned. */
  856. /* In this case, we cannot have a chain. */
  857. ASSERT(PKTNEXT(sd->osh, pkt) == NULL);
  858. sd_data(("%s: Creating aligned %s Packet, len=%d\n",
  859. __FUNCTION__, write ? "TX" : "RX", PKTLEN(sd->osh, pkt)));
  860. #ifdef DHD_USE_STATIC_BUF
  861. if (!(mypkt = PKTGET_STATIC(sd->osh, PKTLEN(sd->osh, pkt), write ? TRUE : FALSE))) {
  862. #else
  863. if (!(mypkt = PKTGET(sd->osh, PKTLEN(sd->osh, pkt), write ? TRUE : FALSE))) {
  864. #endif /* DHD_USE_STATIC_BUF */
  865. sd_err(("%s: PKTGET failed: len %d\n",
  866. __FUNCTION__, PKTLEN(sd->osh, pkt)));
  867. return SDIOH_API_RC_FAIL;
  868. }
  869. /* For a write, copy the buffer data into the packet. */
  870. if (write) {
  871. bcopy(PKTDATA(sd->osh, pkt),
  872. PKTDATA(sd->osh, mypkt),
  873. PKTLEN(sd->osh, pkt));
  874. }
  875. Status = sdioh_request_packet(sd, fix_inc, write, func, addr, mypkt);
  876. /* For a read, copy the packet data back to the buffer. */
  877. if (!write) {
  878. bcopy(PKTDATA(sd->osh, mypkt),
  879. PKTDATA(sd->osh, pkt),
  880. PKTLEN(sd->osh, mypkt));
  881. }
  882. #ifdef DHD_USE_STATIC_BUF
  883. PKTFREE_STATIC(sd->osh, mypkt, write ? TRUE : FALSE);
  884. #else
  885. PKTFREE(sd->osh, mypkt, write ? TRUE : FALSE);
  886. #endif /* DHD_USE_STATIC_BUF */
  887. } else { /* case 3: We have a packet and it is aligned. */
  888. sd_data(("%s: Aligned %s Packet, direct DMA\n",
  889. __FUNCTION__, write ? "Tx" : "Rx"));
  890. Status = sdioh_request_packet(sd, fix_inc, write, func, addr, pkt);
  891. }
  892. return (Status);
  893. }
  894. extern int
  895. sdioh_abort(sdioh_info_t *sd, uint func)
  896. {
  897. sd_trace(("%s: Enter\n", __FUNCTION__));
  898. #if defined(MMC_SDIO_ABORT)
  899. /* issue abort cmd52 command through F1 */
  900. sdioh_request_byte(sd, SD_IO_OP_WRITE, SDIO_FUNC_0, SDIOD_CCCR_IOABORT, (uint8 *)&func);
  901. #endif /* defined(MMC_SDIO_ABORT) */
  902. sd_trace(("%s: Exit\n", __FUNCTION__));
  903. return SDIOH_API_RC_SUCCESS;
  904. }
  905. /* Reset and re-initialize the device */
  906. int sdioh_sdio_reset(sdioh_info_t *si)
  907. {
  908. sd_trace(("%s: Enter\n", __FUNCTION__));
  909. sd_trace(("%s: Exit\n", __FUNCTION__));
  910. return SDIOH_API_RC_SUCCESS;
  911. }
  912. /* Disable device interrupt */
  913. void
  914. sdioh_sdmmc_devintr_off(sdioh_info_t *sd)
  915. {
  916. sd_trace(("%s: %d\n", __FUNCTION__, sd->use_client_ints));
  917. sd->intmask &= ~CLIENT_INTR;
  918. }
  919. /* Enable device interrupt */
  920. void
  921. sdioh_sdmmc_devintr_on(sdioh_info_t *sd)
  922. {
  923. sd_trace(("%s: %d\n", __FUNCTION__, sd->use_client_ints));
  924. sd->intmask |= CLIENT_INTR;
  925. }
  926. /* Read client card reg */
  927. int
  928. sdioh_sdmmc_card_regread(sdioh_info_t *sd, int func, uint32 regaddr, int regsize, uint32 *data)
  929. {
  930. if ((func == 0) || (regsize == 1)) {
  931. uint8 temp = 0;
  932. sdioh_request_byte(sd, SDIOH_READ, func, regaddr, &temp);
  933. *data = temp;
  934. *data &= 0xff;
  935. sd_data(("%s: byte read data=0x%02x\n",
  936. __FUNCTION__, *data));
  937. } else {
  938. sdioh_request_word(sd, 0, SDIOH_READ, func, regaddr, data, regsize);
  939. if (regsize == 2)
  940. *data &= 0xffff;
  941. sd_data(("%s: word read data=0x%08x\n",
  942. __FUNCTION__, *data));
  943. }
  944. return SUCCESS;
  945. }
  946. #if !defined(OOB_INTR_ONLY)
  947. /* bcmsdh_sdmmc interrupt handler */
  948. static void IRQHandler(struct sdio_func *func)
  949. {
  950. sdioh_info_t *sd;
  951. sd_trace(("bcmsdh_sdmmc: ***IRQHandler\n"));
  952. sd = gInstance->sd;
  953. ASSERT(sd != NULL);
  954. sdio_release_host(gInstance->func[0]);
  955. if (sd->use_client_ints) {
  956. sd->intrcount++;
  957. ASSERT(sd->intr_handler);
  958. ASSERT(sd->intr_handler_arg);
  959. (sd->intr_handler)(sd->intr_handler_arg);
  960. } else {
  961. sd_err(("bcmsdh_sdmmc: ***IRQHandler\n"));
  962. sd_err(("%s: Not ready for intr: enabled %d, handler %p\n",
  963. __FUNCTION__, sd->client_intr_enabled, sd->intr_handler));
  964. }
  965. sdio_claim_host(gInstance->func[0]);
  966. }
  967. /* bcmsdh_sdmmc interrupt handler for F2 (dummy handler) */
  968. static void IRQHandlerF2(struct sdio_func *func)
  969. {
  970. sdioh_info_t *sd;
  971. sd_trace(("bcmsdh_sdmmc: ***IRQHandlerF2\n"));
  972. sd = gInstance->sd;
  973. ASSERT(sd != NULL);
  974. }
  975. #endif /* !defined(OOB_INTR_ONLY) */
  976. #ifdef NOTUSED
  977. /* Write client card reg */
  978. static int
  979. sdioh_sdmmc_card_regwrite(sdioh_info_t *sd, int func, uint32 regaddr, int regsize, uint32 data)
  980. {
  981. if ((func == 0) || (regsize == 1)) {
  982. uint8 temp;
  983. temp = data & 0xff;
  984. sdioh_request_byte(sd, SDIOH_READ, func, regaddr, &temp);
  985. sd_data(("%s: byte write data=0x%02x\n",
  986. __FUNCTION__, data));
  987. } else {
  988. if (regsize == 2)
  989. data &= 0xffff;
  990. sdioh_request_word(sd, 0, SDIOH_READ, func, regaddr, &data, regsize);
  991. sd_data(("%s: word write data=0x%08x\n",
  992. __FUNCTION__, data));
  993. }
  994. return SUCCESS;
  995. }
  996. #endif /* NOTUSED */
  997. extern void sdio_reinit(void);
  998. int
  999. sdioh_start(sdioh_info_t *si, int stage)
  1000. {
  1001. //int ret;
  1002. sdioh_info_t *sd = gInstance->sd;
  1003. /* Need to do this stages as we can't enable the interrupt till
  1004. downloading of the firmware is complete, other wise polling
  1005. sdio access will come in way
  1006. */
  1007. if (gInstance->func[0]) {
  1008. if (stage == 0) {
  1009. /* Since the power to the chip is killed, we will have
  1010. re enumerate the device again. Set the block size
  1011. and enable the fucntion 1 for in preparation for
  1012. downloading the code
  1013. */
  1014. /* sdio_reset_comm() - has been fixed in latest kernel/msm.git for Linux
  1015. 2.6.27. The implementation prior to that is buggy, and needs broadcom's
  1016. patch for it
  1017. */
  1018. // if ((ret = sdio_reset_comm(gInstance->func[0]->card)))
  1019. // sd_err(("%s Failed, error = %d\n", __FUNCTION__, ret));
  1020. // else
  1021. #ifndef ENABLE_DEEP_SLEEP
  1022. sdio_reinit();
  1023. #endif
  1024. {
  1025. sd->num_funcs = 2;
  1026. sd->sd_blockmode = TRUE;
  1027. sd->use_client_ints = TRUE;
  1028. sd->client_block_size[0] = 64;
  1029. /* Claim host controller */
  1030. sdio_claim_host(gInstance->func[1]);
  1031. sd->client_block_size[1] = 64;
  1032. if (sdio_set_block_size(gInstance->func[1], 64)) {
  1033. sd_err(("bcmsdh_sdmmc: Failed to set F1 blocksize\n"));
  1034. }
  1035. /* Release host controller F1 */
  1036. sdio_release_host(gInstance->func[1]);
  1037. if (gInstance->func[2]) {
  1038. /* Claim host controller F2 */
  1039. sdio_claim_host(gInstance->func[2]);
  1040. sd->client_block_size[2] = sd_f2_blocksize;
  1041. if (sdio_set_block_size(gInstance->func[2],
  1042. sd_f2_blocksize)) {
  1043. sd_err(("bcmsdh_sdmmc: Failed to set F2 "
  1044. "blocksize to %d\n", sd_f2_blocksize));
  1045. }
  1046. /* Release host controller F2 */
  1047. sdio_release_host(gInstance->func[2]);
  1048. }
  1049. sdioh_sdmmc_card_enablefuncs(sd);
  1050. }
  1051. } else {
  1052. #if !defined(OOB_INTR_ONLY)
  1053. sdio_claim_host(gInstance->func[0]);
  1054. sdio_claim_irq(gInstance->func[2], IRQHandlerF2);
  1055. sdio_claim_irq(gInstance->func[1], IRQHandler);
  1056. sdio_release_host(gInstance->func[0]);
  1057. #else /* defined(OOB_INTR_ONLY) */
  1058. #if defined(HW_OOB)
  1059. sdioh_enable_func_intr();
  1060. #endif
  1061. bcmsdh_oob_intr_set(TRUE);
  1062. #endif /* !defined(OOB_INTR_ONLY) */
  1063. }
  1064. }
  1065. else
  1066. sd_err(("%s Failed\n", __FUNCTION__));
  1067. return (0);
  1068. }
  1069. int
  1070. sdioh_stop(sdioh_info_t *si)
  1071. {
  1072. /* MSM7201A Android sdio stack has bug with interrupt
  1073. So internaly within SDIO stack they are polling
  1074. which cause issue when device is turned off. So
  1075. unregister interrupt with SDIO stack to stop the
  1076. polling
  1077. */
  1078. if (gInstance->func[0]) {
  1079. #if !defined(OOB_INTR_ONLY)
  1080. sdio_claim_host(gInstance->func[0]);
  1081. sdio_release_irq(gInstance->func[1]);
  1082. sdio_release_irq(gInstance->func[2]);
  1083. sdio_release_host(gInstance->func[0]);
  1084. #else /* defined(OOB_INTR_ONLY) */
  1085. #if defined(HW_OOB)
  1086. sdioh_disable_func_intr();
  1087. #endif
  1088. bcmsdh_oob_intr_set(FALSE);
  1089. #endif /* !defined(OOB_INTR_ONLY) */
  1090. }
  1091. else
  1092. sd_err(("%s Failed\n", __FUNCTION__));
  1093. return (0);
  1094. }