main.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724
  1. /*
  2. * Broadcom specific AMBA
  3. * Bus subsystem
  4. *
  5. * Licensed under the GNU/GPL. See COPYING for details.
  6. */
  7. #include "bcma_private.h"
  8. #include <linux/module.h>
  9. #include <linux/mmc/sdio_func.h>
  10. #include <linux/platform_device.h>
  11. #include <linux/pci.h>
  12. #include <linux/bcma/bcma.h>
  13. #include <linux/slab.h>
  14. #include <linux/of_address.h>
  15. #include <linux/of_irq.h>
  16. #include <linux/of_platform.h>
  17. MODULE_DESCRIPTION("Broadcom's specific AMBA driver");
  18. MODULE_LICENSE("GPL");
  19. /* contains the number the next bus should get. */
  20. static unsigned int bcma_bus_next_num = 0;
  21. /* bcma_buses_mutex locks the bcma_bus_next_num */
  22. static DEFINE_MUTEX(bcma_buses_mutex);
  23. static int bcma_bus_match(struct device *dev, struct device_driver *drv);
  24. static int bcma_device_probe(struct device *dev);
  25. static int bcma_device_remove(struct device *dev);
  26. static int bcma_device_uevent(struct device *dev, struct kobj_uevent_env *env);
  27. static ssize_t manuf_show(struct device *dev, struct device_attribute *attr, char *buf)
  28. {
  29. struct bcma_device *core = container_of(dev, struct bcma_device, dev);
  30. return sprintf(buf, "0x%03X\n", core->id.manuf);
  31. }
  32. static DEVICE_ATTR_RO(manuf);
  33. static ssize_t id_show(struct device *dev, struct device_attribute *attr, char *buf)
  34. {
  35. struct bcma_device *core = container_of(dev, struct bcma_device, dev);
  36. return sprintf(buf, "0x%03X\n", core->id.id);
  37. }
  38. static DEVICE_ATTR_RO(id);
  39. static ssize_t rev_show(struct device *dev, struct device_attribute *attr, char *buf)
  40. {
  41. struct bcma_device *core = container_of(dev, struct bcma_device, dev);
  42. return sprintf(buf, "0x%02X\n", core->id.rev);
  43. }
  44. static DEVICE_ATTR_RO(rev);
  45. static ssize_t class_show(struct device *dev, struct device_attribute *attr, char *buf)
  46. {
  47. struct bcma_device *core = container_of(dev, struct bcma_device, dev);
  48. return sprintf(buf, "0x%X\n", core->id.class);
  49. }
  50. static DEVICE_ATTR_RO(class);
  51. static struct attribute *bcma_device_attrs[] = {
  52. &dev_attr_manuf.attr,
  53. &dev_attr_id.attr,
  54. &dev_attr_rev.attr,
  55. &dev_attr_class.attr,
  56. NULL,
  57. };
  58. ATTRIBUTE_GROUPS(bcma_device);
  59. static struct bus_type bcma_bus_type = {
  60. .name = "bcma",
  61. .match = bcma_bus_match,
  62. .probe = bcma_device_probe,
  63. .remove = bcma_device_remove,
  64. .uevent = bcma_device_uevent,
  65. .dev_groups = bcma_device_groups,
  66. };
  67. static u16 bcma_cc_core_id(struct bcma_bus *bus)
  68. {
  69. if (bus->chipinfo.id == BCMA_CHIP_ID_BCM4706)
  70. return BCMA_CORE_4706_CHIPCOMMON;
  71. return BCMA_CORE_CHIPCOMMON;
  72. }
  73. struct bcma_device *bcma_find_core_unit(struct bcma_bus *bus, u16 coreid,
  74. u8 unit)
  75. {
  76. struct bcma_device *core;
  77. list_for_each_entry(core, &bus->cores, list) {
  78. if (core->id.id == coreid && core->core_unit == unit)
  79. return core;
  80. }
  81. return NULL;
  82. }
  83. EXPORT_SYMBOL_GPL(bcma_find_core_unit);
  84. bool bcma_wait_value(struct bcma_device *core, u16 reg, u32 mask, u32 value,
  85. int timeout)
  86. {
  87. unsigned long deadline = jiffies + timeout;
  88. u32 val;
  89. do {
  90. val = bcma_read32(core, reg);
  91. if ((val & mask) == value)
  92. return true;
  93. cpu_relax();
  94. udelay(10);
  95. } while (!time_after_eq(jiffies, deadline));
  96. bcma_warn(core->bus, "Timeout waiting for register 0x%04X!\n", reg);
  97. return false;
  98. }
  99. static void bcma_release_core_dev(struct device *dev)
  100. {
  101. struct bcma_device *core = container_of(dev, struct bcma_device, dev);
  102. if (core->io_addr)
  103. iounmap(core->io_addr);
  104. if (core->io_wrap)
  105. iounmap(core->io_wrap);
  106. kfree(core);
  107. }
  108. static bool bcma_is_core_needed_early(u16 core_id)
  109. {
  110. switch (core_id) {
  111. case BCMA_CORE_NS_NAND:
  112. case BCMA_CORE_NS_QSPI:
  113. return true;
  114. }
  115. return false;
  116. }
  117. static struct device_node *bcma_of_find_child_device(struct device *parent,
  118. struct bcma_device *core)
  119. {
  120. struct device_node *node;
  121. u64 size;
  122. const __be32 *reg;
  123. if (!parent->of_node)
  124. return NULL;
  125. for_each_child_of_node(parent->of_node, node) {
  126. reg = of_get_address(node, 0, &size, NULL);
  127. if (!reg)
  128. continue;
  129. if (of_translate_address(node, reg) == core->addr)
  130. return node;
  131. }
  132. return NULL;
  133. }
  134. static int bcma_of_irq_parse(struct device *parent,
  135. struct bcma_device *core,
  136. struct of_phandle_args *out_irq, int num)
  137. {
  138. __be32 laddr[1];
  139. int rc;
  140. if (core->dev.of_node) {
  141. rc = of_irq_parse_one(core->dev.of_node, num, out_irq);
  142. if (!rc)
  143. return rc;
  144. }
  145. out_irq->np = parent->of_node;
  146. out_irq->args_count = 1;
  147. out_irq->args[0] = num;
  148. laddr[0] = cpu_to_be32(core->addr);
  149. return of_irq_parse_raw(laddr, out_irq);
  150. }
  151. static unsigned int bcma_of_get_irq(struct device *parent,
  152. struct bcma_device *core, int num)
  153. {
  154. struct of_phandle_args out_irq;
  155. int ret;
  156. if (!IS_ENABLED(CONFIG_OF_IRQ) || !parent->of_node)
  157. return 0;
  158. ret = bcma_of_irq_parse(parent, core, &out_irq, num);
  159. if (ret) {
  160. bcma_debug(core->bus, "bcma_of_get_irq() failed with rc=%d\n",
  161. ret);
  162. return 0;
  163. }
  164. return irq_create_of_mapping(&out_irq);
  165. }
  166. static void bcma_of_fill_device(struct device *parent,
  167. struct bcma_device *core)
  168. {
  169. struct device_node *node;
  170. node = bcma_of_find_child_device(parent, core);
  171. if (node)
  172. core->dev.of_node = node;
  173. core->irq = bcma_of_get_irq(parent, core, 0);
  174. of_dma_configure(&core->dev, node);
  175. }
  176. unsigned int bcma_core_irq(struct bcma_device *core, int num)
  177. {
  178. struct bcma_bus *bus = core->bus;
  179. unsigned int mips_irq;
  180. switch (bus->hosttype) {
  181. case BCMA_HOSTTYPE_PCI:
  182. return bus->host_pci->irq;
  183. case BCMA_HOSTTYPE_SOC:
  184. if (bus->drv_mips.core && num == 0) {
  185. mips_irq = bcma_core_mips_irq(core);
  186. return mips_irq <= 4 ? mips_irq + 2 : 0;
  187. }
  188. if (bus->host_pdev)
  189. return bcma_of_get_irq(&bus->host_pdev->dev, core, num);
  190. return 0;
  191. case BCMA_HOSTTYPE_SDIO:
  192. return 0;
  193. }
  194. return 0;
  195. }
  196. EXPORT_SYMBOL(bcma_core_irq);
  197. void bcma_prepare_core(struct bcma_bus *bus, struct bcma_device *core)
  198. {
  199. core->dev.release = bcma_release_core_dev;
  200. core->dev.bus = &bcma_bus_type;
  201. dev_set_name(&core->dev, "bcma%d:%d", bus->num, core->core_index);
  202. core->dev.parent = bcma_bus_get_host_dev(bus);
  203. if (core->dev.parent)
  204. bcma_of_fill_device(core->dev.parent, core);
  205. switch (bus->hosttype) {
  206. case BCMA_HOSTTYPE_PCI:
  207. core->dma_dev = &bus->host_pci->dev;
  208. core->irq = bus->host_pci->irq;
  209. break;
  210. case BCMA_HOSTTYPE_SOC:
  211. if (IS_ENABLED(CONFIG_OF) && bus->host_pdev) {
  212. core->dma_dev = &bus->host_pdev->dev;
  213. } else {
  214. core->dev.dma_mask = &core->dev.coherent_dma_mask;
  215. core->dma_dev = &core->dev;
  216. }
  217. break;
  218. case BCMA_HOSTTYPE_SDIO:
  219. break;
  220. }
  221. }
  222. struct device *bcma_bus_get_host_dev(struct bcma_bus *bus)
  223. {
  224. switch (bus->hosttype) {
  225. case BCMA_HOSTTYPE_PCI:
  226. if (bus->host_pci)
  227. return &bus->host_pci->dev;
  228. else
  229. return NULL;
  230. case BCMA_HOSTTYPE_SOC:
  231. if (bus->host_pdev)
  232. return &bus->host_pdev->dev;
  233. else
  234. return NULL;
  235. case BCMA_HOSTTYPE_SDIO:
  236. if (bus->host_sdio)
  237. return &bus->host_sdio->dev;
  238. else
  239. return NULL;
  240. }
  241. return NULL;
  242. }
  243. void bcma_init_bus(struct bcma_bus *bus)
  244. {
  245. mutex_lock(&bcma_buses_mutex);
  246. bus->num = bcma_bus_next_num++;
  247. mutex_unlock(&bcma_buses_mutex);
  248. INIT_LIST_HEAD(&bus->cores);
  249. bus->nr_cores = 0;
  250. bcma_detect_chip(bus);
  251. }
  252. static void bcma_register_core(struct bcma_bus *bus, struct bcma_device *core)
  253. {
  254. int err;
  255. err = device_register(&core->dev);
  256. if (err) {
  257. bcma_err(bus, "Could not register dev for core 0x%03X\n",
  258. core->id.id);
  259. put_device(&core->dev);
  260. return;
  261. }
  262. core->dev_registered = true;
  263. }
  264. static int bcma_register_devices(struct bcma_bus *bus)
  265. {
  266. struct bcma_device *core;
  267. int err;
  268. list_for_each_entry(core, &bus->cores, list) {
  269. /* We support that cores ourself */
  270. switch (core->id.id) {
  271. case BCMA_CORE_4706_CHIPCOMMON:
  272. case BCMA_CORE_CHIPCOMMON:
  273. case BCMA_CORE_NS_CHIPCOMMON_B:
  274. case BCMA_CORE_PCI:
  275. case BCMA_CORE_PCIE:
  276. case BCMA_CORE_PCIE2:
  277. case BCMA_CORE_MIPS_74K:
  278. case BCMA_CORE_4706_MAC_GBIT_COMMON:
  279. continue;
  280. }
  281. /* Early cores were already registered */
  282. if (bcma_is_core_needed_early(core->id.id))
  283. continue;
  284. /* Only first GMAC core on BCM4706 is connected and working */
  285. if (core->id.id == BCMA_CORE_4706_MAC_GBIT &&
  286. core->core_unit > 0)
  287. continue;
  288. bcma_register_core(bus, core);
  289. }
  290. #ifdef CONFIG_BCMA_PFLASH
  291. if (bus->drv_cc.pflash.present) {
  292. err = platform_device_register(&bcma_pflash_dev);
  293. if (err)
  294. bcma_err(bus, "Error registering parallel flash\n");
  295. }
  296. #endif
  297. #ifdef CONFIG_BCMA_SFLASH
  298. if (bus->drv_cc.sflash.present) {
  299. err = platform_device_register(&bcma_sflash_dev);
  300. if (err)
  301. bcma_err(bus, "Error registering serial flash\n");
  302. }
  303. #endif
  304. #ifdef CONFIG_BCMA_NFLASH
  305. if (bus->drv_cc.nflash.present) {
  306. err = platform_device_register(&bcma_nflash_dev);
  307. if (err)
  308. bcma_err(bus, "Error registering NAND flash\n");
  309. }
  310. #endif
  311. err = bcma_gpio_init(&bus->drv_cc);
  312. if (err == -ENOTSUPP)
  313. bcma_debug(bus, "GPIO driver not activated\n");
  314. else if (err)
  315. bcma_err(bus, "Error registering GPIO driver: %i\n", err);
  316. if (bus->hosttype == BCMA_HOSTTYPE_SOC) {
  317. err = bcma_chipco_watchdog_register(&bus->drv_cc);
  318. if (err)
  319. bcma_err(bus, "Error registering watchdog driver\n");
  320. }
  321. return 0;
  322. }
  323. void bcma_unregister_cores(struct bcma_bus *bus)
  324. {
  325. struct bcma_device *core, *tmp;
  326. list_for_each_entry_safe(core, tmp, &bus->cores, list) {
  327. if (!core->dev_registered)
  328. continue;
  329. list_del(&core->list);
  330. device_unregister(&core->dev);
  331. }
  332. if (bus->hosttype == BCMA_HOSTTYPE_SOC)
  333. platform_device_unregister(bus->drv_cc.watchdog);
  334. /* Now noone uses internally-handled cores, we can free them */
  335. list_for_each_entry_safe(core, tmp, &bus->cores, list) {
  336. list_del(&core->list);
  337. kfree(core);
  338. }
  339. }
  340. int bcma_bus_register(struct bcma_bus *bus)
  341. {
  342. int err;
  343. struct bcma_device *core;
  344. struct device *dev;
  345. /* Scan for devices (cores) */
  346. err = bcma_bus_scan(bus);
  347. if (err) {
  348. bcma_err(bus, "Failed to scan: %d\n", err);
  349. return err;
  350. }
  351. /* Early init CC core */
  352. core = bcma_find_core(bus, bcma_cc_core_id(bus));
  353. if (core) {
  354. bus->drv_cc.core = core;
  355. bcma_core_chipcommon_early_init(&bus->drv_cc);
  356. }
  357. /* Early init PCIE core */
  358. core = bcma_find_core(bus, BCMA_CORE_PCIE);
  359. if (core) {
  360. bus->drv_pci[0].core = core;
  361. bcma_core_pci_early_init(&bus->drv_pci[0]);
  362. }
  363. dev = bcma_bus_get_host_dev(bus);
  364. if (dev) {
  365. of_platform_default_populate(dev->of_node, NULL, dev);
  366. }
  367. /* Cores providing flash access go before SPROM init */
  368. list_for_each_entry(core, &bus->cores, list) {
  369. if (bcma_is_core_needed_early(core->id.id))
  370. bcma_register_core(bus, core);
  371. }
  372. /* Try to get SPROM */
  373. err = bcma_sprom_get(bus);
  374. if (err == -ENOENT) {
  375. bcma_err(bus, "No SPROM available\n");
  376. } else if (err)
  377. bcma_err(bus, "Failed to get SPROM: %d\n", err);
  378. /* Init CC core */
  379. core = bcma_find_core(bus, bcma_cc_core_id(bus));
  380. if (core) {
  381. bus->drv_cc.core = core;
  382. bcma_core_chipcommon_init(&bus->drv_cc);
  383. }
  384. /* Init CC core */
  385. core = bcma_find_core(bus, BCMA_CORE_NS_CHIPCOMMON_B);
  386. if (core) {
  387. bus->drv_cc_b.core = core;
  388. bcma_core_chipcommon_b_init(&bus->drv_cc_b);
  389. }
  390. /* Init MIPS core */
  391. core = bcma_find_core(bus, BCMA_CORE_MIPS_74K);
  392. if (core) {
  393. bus->drv_mips.core = core;
  394. bcma_core_mips_init(&bus->drv_mips);
  395. }
  396. /* Init PCIE core */
  397. core = bcma_find_core_unit(bus, BCMA_CORE_PCIE, 0);
  398. if (core) {
  399. bus->drv_pci[0].core = core;
  400. bcma_core_pci_init(&bus->drv_pci[0]);
  401. }
  402. /* Init PCIE core */
  403. core = bcma_find_core_unit(bus, BCMA_CORE_PCIE, 1);
  404. if (core) {
  405. bus->drv_pci[1].core = core;
  406. bcma_core_pci_init(&bus->drv_pci[1]);
  407. }
  408. /* Init PCIe Gen 2 core */
  409. core = bcma_find_core_unit(bus, BCMA_CORE_PCIE2, 0);
  410. if (core) {
  411. bus->drv_pcie2.core = core;
  412. bcma_core_pcie2_init(&bus->drv_pcie2);
  413. }
  414. /* Init GBIT MAC COMMON core */
  415. core = bcma_find_core(bus, BCMA_CORE_4706_MAC_GBIT_COMMON);
  416. if (core) {
  417. bus->drv_gmac_cmn.core = core;
  418. bcma_core_gmac_cmn_init(&bus->drv_gmac_cmn);
  419. }
  420. /* Register found cores */
  421. bcma_register_devices(bus);
  422. bcma_info(bus, "Bus registered\n");
  423. return 0;
  424. }
  425. void bcma_bus_unregister(struct bcma_bus *bus)
  426. {
  427. int err;
  428. err = bcma_gpio_unregister(&bus->drv_cc);
  429. if (err == -EBUSY)
  430. bcma_err(bus, "Some GPIOs are still in use.\n");
  431. else if (err)
  432. bcma_err(bus, "Can not unregister GPIO driver: %i\n", err);
  433. bcma_core_chipcommon_b_free(&bus->drv_cc_b);
  434. bcma_unregister_cores(bus);
  435. }
  436. /*
  437. * This is a special version of bus registration function designed for SoCs.
  438. * It scans bus and performs basic initialization of main cores only.
  439. * Please note it requires memory allocation, however it won't try to sleep.
  440. */
  441. int __init bcma_bus_early_register(struct bcma_bus *bus)
  442. {
  443. int err;
  444. struct bcma_device *core;
  445. /* Scan for devices (cores) */
  446. err = bcma_bus_scan(bus);
  447. if (err) {
  448. bcma_err(bus, "Failed to scan bus: %d\n", err);
  449. return -1;
  450. }
  451. /* Early init CC core */
  452. core = bcma_find_core(bus, bcma_cc_core_id(bus));
  453. if (core) {
  454. bus->drv_cc.core = core;
  455. bcma_core_chipcommon_early_init(&bus->drv_cc);
  456. }
  457. /* Early init MIPS core */
  458. core = bcma_find_core(bus, BCMA_CORE_MIPS_74K);
  459. if (core) {
  460. bus->drv_mips.core = core;
  461. bcma_core_mips_early_init(&bus->drv_mips);
  462. }
  463. bcma_info(bus, "Early bus registered\n");
  464. return 0;
  465. }
  466. #ifdef CONFIG_PM
  467. int bcma_bus_suspend(struct bcma_bus *bus)
  468. {
  469. struct bcma_device *core;
  470. list_for_each_entry(core, &bus->cores, list) {
  471. struct device_driver *drv = core->dev.driver;
  472. if (drv) {
  473. struct bcma_driver *adrv = container_of(drv, struct bcma_driver, drv);
  474. if (adrv->suspend)
  475. adrv->suspend(core);
  476. }
  477. }
  478. return 0;
  479. }
  480. int bcma_bus_resume(struct bcma_bus *bus)
  481. {
  482. struct bcma_device *core;
  483. /* Init CC core */
  484. if (bus->drv_cc.core) {
  485. bus->drv_cc.setup_done = false;
  486. bcma_core_chipcommon_init(&bus->drv_cc);
  487. }
  488. list_for_each_entry(core, &bus->cores, list) {
  489. struct device_driver *drv = core->dev.driver;
  490. if (drv) {
  491. struct bcma_driver *adrv = container_of(drv, struct bcma_driver, drv);
  492. if (adrv->resume)
  493. adrv->resume(core);
  494. }
  495. }
  496. return 0;
  497. }
  498. #endif
  499. int __bcma_driver_register(struct bcma_driver *drv, struct module *owner)
  500. {
  501. drv->drv.name = drv->name;
  502. drv->drv.bus = &bcma_bus_type;
  503. drv->drv.owner = owner;
  504. return driver_register(&drv->drv);
  505. }
  506. EXPORT_SYMBOL_GPL(__bcma_driver_register);
  507. void bcma_driver_unregister(struct bcma_driver *drv)
  508. {
  509. driver_unregister(&drv->drv);
  510. }
  511. EXPORT_SYMBOL_GPL(bcma_driver_unregister);
  512. static int bcma_bus_match(struct device *dev, struct device_driver *drv)
  513. {
  514. struct bcma_device *core = container_of(dev, struct bcma_device, dev);
  515. struct bcma_driver *adrv = container_of(drv, struct bcma_driver, drv);
  516. const struct bcma_device_id *cid = &core->id;
  517. const struct bcma_device_id *did;
  518. for (did = adrv->id_table; did->manuf || did->id || did->rev; did++) {
  519. if ((did->manuf == cid->manuf || did->manuf == BCMA_ANY_MANUF) &&
  520. (did->id == cid->id || did->id == BCMA_ANY_ID) &&
  521. (did->rev == cid->rev || did->rev == BCMA_ANY_REV) &&
  522. (did->class == cid->class || did->class == BCMA_ANY_CLASS))
  523. return 1;
  524. }
  525. return 0;
  526. }
  527. static int bcma_device_probe(struct device *dev)
  528. {
  529. struct bcma_device *core = container_of(dev, struct bcma_device, dev);
  530. struct bcma_driver *adrv = container_of(dev->driver, struct bcma_driver,
  531. drv);
  532. int err = 0;
  533. get_device(dev);
  534. if (adrv->probe)
  535. err = adrv->probe(core);
  536. if (err)
  537. put_device(dev);
  538. return err;
  539. }
  540. static int bcma_device_remove(struct device *dev)
  541. {
  542. struct bcma_device *core = container_of(dev, struct bcma_device, dev);
  543. struct bcma_driver *adrv = container_of(dev->driver, struct bcma_driver,
  544. drv);
  545. if (adrv->remove)
  546. adrv->remove(core);
  547. put_device(dev);
  548. return 0;
  549. }
  550. static int bcma_device_uevent(struct device *dev, struct kobj_uevent_env *env)
  551. {
  552. struct bcma_device *core = container_of(dev, struct bcma_device, dev);
  553. return add_uevent_var(env,
  554. "MODALIAS=bcma:m%04Xid%04Xrev%02Xcl%02X",
  555. core->id.manuf, core->id.id,
  556. core->id.rev, core->id.class);
  557. }
  558. static unsigned int bcma_bus_registered;
  559. /*
  560. * If built-in, bus has to be registered early, before any driver calls
  561. * bcma_driver_register.
  562. * Otherwise registering driver would trigger BUG in driver_register.
  563. */
  564. static int __init bcma_init_bus_register(void)
  565. {
  566. int err;
  567. if (bcma_bus_registered)
  568. return 0;
  569. err = bus_register(&bcma_bus_type);
  570. if (!err)
  571. bcma_bus_registered = 1;
  572. return err;
  573. }
  574. #ifndef MODULE
  575. fs_initcall(bcma_init_bus_register);
  576. #endif
  577. /* Main initialization has to be done with SPI/mtd/NAND/SPROM available */
  578. static int __init bcma_modinit(void)
  579. {
  580. int err;
  581. err = bcma_init_bus_register();
  582. if (err)
  583. return err;
  584. err = bcma_host_soc_register_driver();
  585. if (err) {
  586. pr_err("SoC host initialization failed\n");
  587. err = 0;
  588. }
  589. #ifdef CONFIG_BCMA_HOST_PCI
  590. err = bcma_host_pci_init();
  591. if (err) {
  592. pr_err("PCI host initialization failed\n");
  593. err = 0;
  594. }
  595. #endif
  596. return err;
  597. }
  598. module_init(bcma_modinit);
  599. static void __exit bcma_modexit(void)
  600. {
  601. #ifdef CONFIG_BCMA_HOST_PCI
  602. bcma_host_pci_exit();
  603. #endif
  604. bcma_host_soc_unregister_driver();
  605. bus_unregister(&bcma_bus_type);
  606. }
  607. module_exit(bcma_modexit)