io.h 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986
  1. /* Generic I/O port emulation, based on MN10300 code
  2. *
  3. * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
  4. * Written by David Howells (dhowells@redhat.com)
  5. *
  6. * This program is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU General Public Licence
  8. * as published by the Free Software Foundation; either version
  9. * 2 of the Licence, or (at your option) any later version.
  10. */
  11. #ifndef __ASM_GENERIC_IO_H
  12. #define __ASM_GENERIC_IO_H
  13. #include <asm/page.h> /* I/O is all done through memory accesses */
  14. #include <linux/string.h> /* for memset() and memcpy() */
  15. #include <linux/types.h>
  16. #ifdef CONFIG_GENERIC_IOMAP
  17. #include <asm-generic/iomap.h>
  18. #endif
  19. #include <asm-generic/pci_iomap.h>
  20. #ifndef mmiowb
  21. #define mmiowb() do {} while (0)
  22. #endif
  23. /*
  24. * __raw_{read,write}{b,w,l,q}() access memory in native endianness.
  25. *
  26. * On some architectures memory mapped IO needs to be accessed differently.
  27. * On the simple architectures, we just read/write the memory location
  28. * directly.
  29. */
  30. #ifndef __raw_readb
  31. #define __raw_readb __raw_readb
  32. static inline u8 __raw_readb(const volatile void __iomem *addr)
  33. {
  34. return *(const volatile u8 __force *)addr;
  35. }
  36. #endif
  37. #ifndef __raw_readw
  38. #define __raw_readw __raw_readw
  39. static inline u16 __raw_readw(const volatile void __iomem *addr)
  40. {
  41. return *(const volatile u16 __force *)addr;
  42. }
  43. #endif
  44. #ifndef __raw_readl
  45. #define __raw_readl __raw_readl
  46. static inline u32 __raw_readl(const volatile void __iomem *addr)
  47. {
  48. return *(const volatile u32 __force *)addr;
  49. }
  50. #endif
  51. #ifdef CONFIG_64BIT
  52. #ifndef __raw_readq
  53. #define __raw_readq __raw_readq
  54. static inline u64 __raw_readq(const volatile void __iomem *addr)
  55. {
  56. return *(const volatile u64 __force *)addr;
  57. }
  58. #endif
  59. #endif /* CONFIG_64BIT */
  60. #ifndef __raw_writeb
  61. #define __raw_writeb __raw_writeb
  62. static inline void __raw_writeb(u8 value, volatile void __iomem *addr)
  63. {
  64. *(volatile u8 __force *)addr = value;
  65. }
  66. #endif
  67. #ifndef __raw_writew
  68. #define __raw_writew __raw_writew
  69. static inline void __raw_writew(u16 value, volatile void __iomem *addr)
  70. {
  71. *(volatile u16 __force *)addr = value;
  72. }
  73. #endif
  74. #ifndef __raw_writel
  75. #define __raw_writel __raw_writel
  76. static inline void __raw_writel(u32 value, volatile void __iomem *addr)
  77. {
  78. *(volatile u32 __force *)addr = value;
  79. }
  80. #endif
  81. #ifdef CONFIG_64BIT
  82. #ifndef __raw_writeq
  83. #define __raw_writeq __raw_writeq
  84. static inline void __raw_writeq(u64 value, volatile void __iomem *addr)
  85. {
  86. *(volatile u64 __force *)addr = value;
  87. }
  88. #endif
  89. #endif /* CONFIG_64BIT */
  90. /*
  91. * {read,write}{b,w,l,q}() access little endian memory and return result in
  92. * native endianness.
  93. */
  94. #ifndef readb
  95. #define readb readb
  96. static inline u8 readb(const volatile void __iomem *addr)
  97. {
  98. return __raw_readb(addr);
  99. }
  100. #endif
  101. #ifndef readw
  102. #define readw readw
  103. static inline u16 readw(const volatile void __iomem *addr)
  104. {
  105. return __le16_to_cpu(__raw_readw(addr));
  106. }
  107. #endif
  108. #ifndef readl
  109. #define readl readl
  110. static inline u32 readl(const volatile void __iomem *addr)
  111. {
  112. return __le32_to_cpu(__raw_readl(addr));
  113. }
  114. #endif
  115. #ifdef CONFIG_64BIT
  116. #ifndef readq
  117. #define readq readq
  118. static inline u64 readq(const volatile void __iomem *addr)
  119. {
  120. return __le64_to_cpu(__raw_readq(addr));
  121. }
  122. #endif
  123. #endif /* CONFIG_64BIT */
  124. #ifndef writeb
  125. #define writeb writeb
  126. static inline void writeb(u8 value, volatile void __iomem *addr)
  127. {
  128. __raw_writeb(value, addr);
  129. }
  130. #endif
  131. #ifndef writew
  132. #define writew writew
  133. static inline void writew(u16 value, volatile void __iomem *addr)
  134. {
  135. __raw_writew(cpu_to_le16(value), addr);
  136. }
  137. #endif
  138. #ifndef writel
  139. #define writel writel
  140. static inline void writel(u32 value, volatile void __iomem *addr)
  141. {
  142. __raw_writel(__cpu_to_le32(value), addr);
  143. }
  144. #endif
  145. #ifdef CONFIG_64BIT
  146. #ifndef writeq
  147. #define writeq writeq
  148. static inline void writeq(u64 value, volatile void __iomem *addr)
  149. {
  150. __raw_writeq(__cpu_to_le64(value), addr);
  151. }
  152. #endif
  153. #endif /* CONFIG_64BIT */
  154. /*
  155. * {read,write}{b,w,l,q}_relaxed() are like the regular version, but
  156. * are not guaranteed to provide ordering against spinlocks or memory
  157. * accesses.
  158. */
  159. #ifndef readb_relaxed
  160. #define readb_relaxed readb
  161. #endif
  162. #ifndef readw_relaxed
  163. #define readw_relaxed readw
  164. #endif
  165. #ifndef readl_relaxed
  166. #define readl_relaxed readl
  167. #endif
  168. #if defined(readq) && !defined(readq_relaxed)
  169. #define readq_relaxed readq
  170. #endif
  171. #ifndef writeb_relaxed
  172. #define writeb_relaxed writeb
  173. #endif
  174. #ifndef writew_relaxed
  175. #define writew_relaxed writew
  176. #endif
  177. #ifndef writel_relaxed
  178. #define writel_relaxed writel
  179. #endif
  180. #if defined(writeq) && !defined(writeq_relaxed)
  181. #define writeq_relaxed writeq
  182. #endif
  183. /*
  184. * {read,write}s{b,w,l,q}() repeatedly access the same memory address in
  185. * native endianness in 8-, 16-, 32- or 64-bit chunks (@count times).
  186. */
  187. #ifndef readsb
  188. #define readsb readsb
  189. static inline void readsb(const volatile void __iomem *addr, void *buffer,
  190. unsigned int count)
  191. {
  192. if (count) {
  193. u8 *buf = buffer;
  194. do {
  195. u8 x = __raw_readb(addr);
  196. *buf++ = x;
  197. } while (--count);
  198. }
  199. }
  200. #endif
  201. #ifndef readsw
  202. #define readsw readsw
  203. static inline void readsw(const volatile void __iomem *addr, void *buffer,
  204. unsigned int count)
  205. {
  206. if (count) {
  207. u16 *buf = buffer;
  208. do {
  209. u16 x = __raw_readw(addr);
  210. *buf++ = x;
  211. } while (--count);
  212. }
  213. }
  214. #endif
  215. #ifndef readsl
  216. #define readsl readsl
  217. static inline void readsl(const volatile void __iomem *addr, void *buffer,
  218. unsigned int count)
  219. {
  220. if (count) {
  221. u32 *buf = buffer;
  222. do {
  223. u32 x = __raw_readl(addr);
  224. *buf++ = x;
  225. } while (--count);
  226. }
  227. }
  228. #endif
  229. #ifdef CONFIG_64BIT
  230. #ifndef readsq
  231. #define readsq readsq
  232. static inline void readsq(const volatile void __iomem *addr, void *buffer,
  233. unsigned int count)
  234. {
  235. if (count) {
  236. u64 *buf = buffer;
  237. do {
  238. u64 x = __raw_readq(addr);
  239. *buf++ = x;
  240. } while (--count);
  241. }
  242. }
  243. #endif
  244. #endif /* CONFIG_64BIT */
  245. #ifndef writesb
  246. #define writesb writesb
  247. static inline void writesb(volatile void __iomem *addr, const void *buffer,
  248. unsigned int count)
  249. {
  250. if (count) {
  251. const u8 *buf = buffer;
  252. do {
  253. __raw_writeb(*buf++, addr);
  254. } while (--count);
  255. }
  256. }
  257. #endif
  258. #ifndef writesw
  259. #define writesw writesw
  260. static inline void writesw(volatile void __iomem *addr, const void *buffer,
  261. unsigned int count)
  262. {
  263. if (count) {
  264. const u16 *buf = buffer;
  265. do {
  266. __raw_writew(*buf++, addr);
  267. } while (--count);
  268. }
  269. }
  270. #endif
  271. #ifndef writesl
  272. #define writesl writesl
  273. static inline void writesl(volatile void __iomem *addr, const void *buffer,
  274. unsigned int count)
  275. {
  276. if (count) {
  277. const u32 *buf = buffer;
  278. do {
  279. __raw_writel(*buf++, addr);
  280. } while (--count);
  281. }
  282. }
  283. #endif
  284. #ifdef CONFIG_64BIT
  285. #ifndef writesq
  286. #define writesq writesq
  287. static inline void writesq(volatile void __iomem *addr, const void *buffer,
  288. unsigned int count)
  289. {
  290. if (count) {
  291. const u64 *buf = buffer;
  292. do {
  293. __raw_writeq(*buf++, addr);
  294. } while (--count);
  295. }
  296. }
  297. #endif
  298. #endif /* CONFIG_64BIT */
  299. #ifndef PCI_IOBASE
  300. #define PCI_IOBASE ((void __iomem *)0)
  301. #endif
  302. #ifndef IO_SPACE_LIMIT
  303. #define IO_SPACE_LIMIT 0xffff
  304. #endif
  305. /*
  306. * {in,out}{b,w,l}() access little endian I/O. {in,out}{b,w,l}_p() can be
  307. * implemented on hardware that needs an additional delay for I/O accesses to
  308. * take effect.
  309. */
  310. #ifndef inb
  311. #define inb inb
  312. static inline u8 inb(unsigned long addr)
  313. {
  314. return readb(PCI_IOBASE + addr);
  315. }
  316. #endif
  317. #ifndef inw
  318. #define inw inw
  319. static inline u16 inw(unsigned long addr)
  320. {
  321. return readw(PCI_IOBASE + addr);
  322. }
  323. #endif
  324. #ifndef inl
  325. #define inl inl
  326. static inline u32 inl(unsigned long addr)
  327. {
  328. return readl(PCI_IOBASE + addr);
  329. }
  330. #endif
  331. #ifndef outb
  332. #define outb outb
  333. static inline void outb(u8 value, unsigned long addr)
  334. {
  335. writeb(value, PCI_IOBASE + addr);
  336. }
  337. #endif
  338. #ifndef outw
  339. #define outw outw
  340. static inline void outw(u16 value, unsigned long addr)
  341. {
  342. writew(value, PCI_IOBASE + addr);
  343. }
  344. #endif
  345. #ifndef outl
  346. #define outl outl
  347. static inline void outl(u32 value, unsigned long addr)
  348. {
  349. writel(value, PCI_IOBASE + addr);
  350. }
  351. #endif
  352. #ifndef inb_p
  353. #define inb_p inb_p
  354. static inline u8 inb_p(unsigned long addr)
  355. {
  356. return inb(addr);
  357. }
  358. #endif
  359. #ifndef inw_p
  360. #define inw_p inw_p
  361. static inline u16 inw_p(unsigned long addr)
  362. {
  363. return inw(addr);
  364. }
  365. #endif
  366. #ifndef inl_p
  367. #define inl_p inl_p
  368. static inline u32 inl_p(unsigned long addr)
  369. {
  370. return inl(addr);
  371. }
  372. #endif
  373. #ifndef outb_p
  374. #define outb_p outb_p
  375. static inline void outb_p(u8 value, unsigned long addr)
  376. {
  377. outb(value, addr);
  378. }
  379. #endif
  380. #ifndef outw_p
  381. #define outw_p outw_p
  382. static inline void outw_p(u16 value, unsigned long addr)
  383. {
  384. outw(value, addr);
  385. }
  386. #endif
  387. #ifndef outl_p
  388. #define outl_p outl_p
  389. static inline void outl_p(u32 value, unsigned long addr)
  390. {
  391. outl(value, addr);
  392. }
  393. #endif
  394. /*
  395. * {in,out}s{b,w,l}{,_p}() are variants of the above that repeatedly access a
  396. * single I/O port multiple times.
  397. */
  398. #ifndef insb
  399. #define insb insb
  400. static inline void insb(unsigned long addr, void *buffer, unsigned int count)
  401. {
  402. readsb(PCI_IOBASE + addr, buffer, count);
  403. }
  404. #endif
  405. #ifndef insw
  406. #define insw insw
  407. static inline void insw(unsigned long addr, void *buffer, unsigned int count)
  408. {
  409. readsw(PCI_IOBASE + addr, buffer, count);
  410. }
  411. #endif
  412. #ifndef insl
  413. #define insl insl
  414. static inline void insl(unsigned long addr, void *buffer, unsigned int count)
  415. {
  416. readsl(PCI_IOBASE + addr, buffer, count);
  417. }
  418. #endif
  419. #ifndef outsb
  420. #define outsb outsb
  421. static inline void outsb(unsigned long addr, const void *buffer,
  422. unsigned int count)
  423. {
  424. writesb(PCI_IOBASE + addr, buffer, count);
  425. }
  426. #endif
  427. #ifndef outsw
  428. #define outsw outsw
  429. static inline void outsw(unsigned long addr, const void *buffer,
  430. unsigned int count)
  431. {
  432. writesw(PCI_IOBASE + addr, buffer, count);
  433. }
  434. #endif
  435. #ifndef outsl
  436. #define outsl outsl
  437. static inline void outsl(unsigned long addr, const void *buffer,
  438. unsigned int count)
  439. {
  440. writesl(PCI_IOBASE + addr, buffer, count);
  441. }
  442. #endif
  443. #ifndef insb_p
  444. #define insb_p insb_p
  445. static inline void insb_p(unsigned long addr, void *buffer, unsigned int count)
  446. {
  447. insb(addr, buffer, count);
  448. }
  449. #endif
  450. #ifndef insw_p
  451. #define insw_p insw_p
  452. static inline void insw_p(unsigned long addr, void *buffer, unsigned int count)
  453. {
  454. insw(addr, buffer, count);
  455. }
  456. #endif
  457. #ifndef insl_p
  458. #define insl_p insl_p
  459. static inline void insl_p(unsigned long addr, void *buffer, unsigned int count)
  460. {
  461. insl(addr, buffer, count);
  462. }
  463. #endif
  464. #ifndef outsb_p
  465. #define outsb_p outsb_p
  466. static inline void outsb_p(unsigned long addr, const void *buffer,
  467. unsigned int count)
  468. {
  469. outsb(addr, buffer, count);
  470. }
  471. #endif
  472. #ifndef outsw_p
  473. #define outsw_p outsw_p
  474. static inline void outsw_p(unsigned long addr, const void *buffer,
  475. unsigned int count)
  476. {
  477. outsw(addr, buffer, count);
  478. }
  479. #endif
  480. #ifndef outsl_p
  481. #define outsl_p outsl_p
  482. static inline void outsl_p(unsigned long addr, const void *buffer,
  483. unsigned int count)
  484. {
  485. outsl(addr, buffer, count);
  486. }
  487. #endif
  488. #ifndef CONFIG_GENERIC_IOMAP
  489. #ifndef ioread8
  490. #define ioread8 ioread8
  491. static inline u8 ioread8(const volatile void __iomem *addr)
  492. {
  493. return readb(addr);
  494. }
  495. #endif
  496. #ifndef ioread16
  497. #define ioread16 ioread16
  498. static inline u16 ioread16(const volatile void __iomem *addr)
  499. {
  500. return readw(addr);
  501. }
  502. #endif
  503. #ifndef ioread32
  504. #define ioread32 ioread32
  505. static inline u32 ioread32(const volatile void __iomem *addr)
  506. {
  507. return readl(addr);
  508. }
  509. #endif
  510. #ifdef CONFIG_64BIT
  511. #ifndef ioread64
  512. #define ioread64 ioread64
  513. static inline u64 ioread64(const volatile void __iomem *addr)
  514. {
  515. return readq(addr);
  516. }
  517. #endif
  518. #endif /* CONFIG_64BIT */
  519. #ifndef iowrite8
  520. #define iowrite8 iowrite8
  521. static inline void iowrite8(u8 value, volatile void __iomem *addr)
  522. {
  523. writeb(value, addr);
  524. }
  525. #endif
  526. #ifndef iowrite16
  527. #define iowrite16 iowrite16
  528. static inline void iowrite16(u16 value, volatile void __iomem *addr)
  529. {
  530. writew(value, addr);
  531. }
  532. #endif
  533. #ifndef iowrite32
  534. #define iowrite32 iowrite32
  535. static inline void iowrite32(u32 value, volatile void __iomem *addr)
  536. {
  537. writel(value, addr);
  538. }
  539. #endif
  540. #ifdef CONFIG_64BIT
  541. #ifndef iowrite64
  542. #define iowrite64 iowrite64
  543. static inline void iowrite64(u64 value, volatile void __iomem *addr)
  544. {
  545. writeq(value, addr);
  546. }
  547. #endif
  548. #endif /* CONFIG_64BIT */
  549. #ifndef ioread16be
  550. #define ioread16be ioread16be
  551. static inline u16 ioread16be(const volatile void __iomem *addr)
  552. {
  553. return swab16(readw(addr));
  554. }
  555. #endif
  556. #ifndef ioread32be
  557. #define ioread32be ioread32be
  558. static inline u32 ioread32be(const volatile void __iomem *addr)
  559. {
  560. return swab32(readl(addr));
  561. }
  562. #endif
  563. #ifdef CONFIG_64BIT
  564. #ifndef ioread64be
  565. #define ioread64be ioread64be
  566. static inline u64 ioread64be(const volatile void __iomem *addr)
  567. {
  568. return swab64(readq(addr));
  569. }
  570. #endif
  571. #endif /* CONFIG_64BIT */
  572. #ifndef iowrite16be
  573. #define iowrite16be iowrite16be
  574. static inline void iowrite16be(u16 value, void volatile __iomem *addr)
  575. {
  576. writew(swab16(value), addr);
  577. }
  578. #endif
  579. #ifndef iowrite32be
  580. #define iowrite32be iowrite32be
  581. static inline void iowrite32be(u32 value, volatile void __iomem *addr)
  582. {
  583. writel(swab32(value), addr);
  584. }
  585. #endif
  586. #ifdef CONFIG_64BIT
  587. #ifndef iowrite64be
  588. #define iowrite64be iowrite64be
  589. static inline void iowrite64be(u64 value, volatile void __iomem *addr)
  590. {
  591. writeq(swab64(value), addr);
  592. }
  593. #endif
  594. #endif /* CONFIG_64BIT */
  595. #ifndef ioread8_rep
  596. #define ioread8_rep ioread8_rep
  597. static inline void ioread8_rep(const volatile void __iomem *addr, void *buffer,
  598. unsigned int count)
  599. {
  600. readsb(addr, buffer, count);
  601. }
  602. #endif
  603. #ifndef ioread16_rep
  604. #define ioread16_rep ioread16_rep
  605. static inline void ioread16_rep(const volatile void __iomem *addr,
  606. void *buffer, unsigned int count)
  607. {
  608. readsw(addr, buffer, count);
  609. }
  610. #endif
  611. #ifndef ioread32_rep
  612. #define ioread32_rep ioread32_rep
  613. static inline void ioread32_rep(const volatile void __iomem *addr,
  614. void *buffer, unsigned int count)
  615. {
  616. readsl(addr, buffer, count);
  617. }
  618. #endif
  619. #ifdef CONFIG_64BIT
  620. #ifndef ioread64_rep
  621. #define ioread64_rep ioread64_rep
  622. static inline void ioread64_rep(const volatile void __iomem *addr,
  623. void *buffer, unsigned int count)
  624. {
  625. readsq(addr, buffer, count);
  626. }
  627. #endif
  628. #endif /* CONFIG_64BIT */
  629. #ifndef iowrite8_rep
  630. #define iowrite8_rep iowrite8_rep
  631. static inline void iowrite8_rep(volatile void __iomem *addr,
  632. const void *buffer,
  633. unsigned int count)
  634. {
  635. writesb(addr, buffer, count);
  636. }
  637. #endif
  638. #ifndef iowrite16_rep
  639. #define iowrite16_rep iowrite16_rep
  640. static inline void iowrite16_rep(volatile void __iomem *addr,
  641. const void *buffer,
  642. unsigned int count)
  643. {
  644. writesw(addr, buffer, count);
  645. }
  646. #endif
  647. #ifndef iowrite32_rep
  648. #define iowrite32_rep iowrite32_rep
  649. static inline void iowrite32_rep(volatile void __iomem *addr,
  650. const void *buffer,
  651. unsigned int count)
  652. {
  653. writesl(addr, buffer, count);
  654. }
  655. #endif
  656. #ifdef CONFIG_64BIT
  657. #ifndef iowrite64_rep
  658. #define iowrite64_rep iowrite64_rep
  659. static inline void iowrite64_rep(volatile void __iomem *addr,
  660. const void *buffer,
  661. unsigned int count)
  662. {
  663. writesq(addr, buffer, count);
  664. }
  665. #endif
  666. #endif /* CONFIG_64BIT */
  667. #endif /* CONFIG_GENERIC_IOMAP */
  668. #ifdef __KERNEL__
  669. #include <linux/vmalloc.h>
  670. #define __io_virt(x) ((void __force *)(x))
  671. #ifndef CONFIG_GENERIC_IOMAP
  672. struct pci_dev;
  673. extern void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long max);
  674. #ifndef pci_iounmap
  675. #define pci_iounmap pci_iounmap
  676. static inline void pci_iounmap(struct pci_dev *dev, void __iomem *p)
  677. {
  678. }
  679. #endif
  680. #endif /* CONFIG_GENERIC_IOMAP */
  681. /*
  682. * Change virtual addresses to physical addresses and vv.
  683. * These are pretty trivial
  684. */
  685. #ifndef virt_to_phys
  686. #define virt_to_phys virt_to_phys
  687. static inline unsigned long virt_to_phys(volatile void *address)
  688. {
  689. return __pa((unsigned long)address);
  690. }
  691. #endif
  692. #ifndef phys_to_virt
  693. #define phys_to_virt phys_to_virt
  694. static inline void *phys_to_virt(unsigned long address)
  695. {
  696. return __va(address);
  697. }
  698. #endif
  699. /**
  700. * DOC: ioremap() and ioremap_*() variants
  701. *
  702. * If you have an IOMMU your architecture is expected to have both ioremap()
  703. * and iounmap() implemented otherwise the asm-generic helpers will provide a
  704. * direct mapping.
  705. *
  706. * There are ioremap_*() call variants, if you have no IOMMU we naturally will
  707. * default to direct mapping for all of them, you can override these defaults.
  708. * If you have an IOMMU you are highly encouraged to provide your own
  709. * ioremap variant implementation as there currently is no safe architecture
  710. * agnostic default. To avoid possible improper behaviour default asm-generic
  711. * ioremap_*() variants all return NULL when an IOMMU is available. If you've
  712. * defined your own ioremap_*() variant you must then declare your own
  713. * ioremap_*() variant as defined to itself to avoid the default NULL return.
  714. */
  715. #ifdef CONFIG_MMU
  716. #ifndef ioremap_uc
  717. #define ioremap_uc ioremap_uc
  718. static inline void __iomem *ioremap_uc(phys_addr_t offset, size_t size)
  719. {
  720. return NULL;
  721. }
  722. #endif
  723. #else /* !CONFIG_MMU */
  724. /*
  725. * Change "struct page" to physical address.
  726. *
  727. * This implementation is for the no-MMU case only... if you have an MMU
  728. * you'll need to provide your own definitions.
  729. */
  730. #ifndef ioremap
  731. #define ioremap ioremap
  732. static inline void __iomem *ioremap(phys_addr_t offset, size_t size)
  733. {
  734. return (void __iomem *)(unsigned long)offset;
  735. }
  736. #endif
  737. #ifndef __ioremap
  738. #define __ioremap __ioremap
  739. static inline void __iomem *__ioremap(phys_addr_t offset, size_t size,
  740. unsigned long flags)
  741. {
  742. return ioremap(offset, size);
  743. }
  744. #endif
  745. #ifndef ioremap_nocache
  746. #define ioremap_nocache ioremap_nocache
  747. static inline void __iomem *ioremap_nocache(phys_addr_t offset, size_t size)
  748. {
  749. return ioremap(offset, size);
  750. }
  751. #endif
  752. #ifndef ioremap_uc
  753. #define ioremap_uc ioremap_uc
  754. static inline void __iomem *ioremap_uc(phys_addr_t offset, size_t size)
  755. {
  756. return ioremap_nocache(offset, size);
  757. }
  758. #endif
  759. #ifndef ioremap_wc
  760. #define ioremap_wc ioremap_wc
  761. static inline void __iomem *ioremap_wc(phys_addr_t offset, size_t size)
  762. {
  763. return ioremap_nocache(offset, size);
  764. }
  765. #endif
  766. #ifndef ioremap_wt
  767. #define ioremap_wt ioremap_wt
  768. static inline void __iomem *ioremap_wt(phys_addr_t offset, size_t size)
  769. {
  770. return ioremap_nocache(offset, size);
  771. }
  772. #endif
  773. #ifndef iounmap
  774. #define iounmap iounmap
  775. static inline void iounmap(void __iomem *addr)
  776. {
  777. }
  778. #endif
  779. #endif /* CONFIG_MMU */
  780. #ifdef CONFIG_HAS_IOPORT_MAP
  781. #ifndef CONFIG_GENERIC_IOMAP
  782. #ifndef ioport_map
  783. #define ioport_map ioport_map
  784. static inline void __iomem *ioport_map(unsigned long port, unsigned int nr)
  785. {
  786. return PCI_IOBASE + (port & IO_SPACE_LIMIT);
  787. }
  788. #endif
  789. #ifndef ioport_unmap
  790. #define ioport_unmap ioport_unmap
  791. static inline void ioport_unmap(void __iomem *p)
  792. {
  793. }
  794. #endif
  795. #else /* CONFIG_GENERIC_IOMAP */
  796. extern void __iomem *ioport_map(unsigned long port, unsigned int nr);
  797. extern void ioport_unmap(void __iomem *p);
  798. #endif /* CONFIG_GENERIC_IOMAP */
  799. #endif /* CONFIG_HAS_IOPORT_MAP */
  800. #ifndef xlate_dev_kmem_ptr
  801. #define xlate_dev_kmem_ptr xlate_dev_kmem_ptr
  802. static inline void *xlate_dev_kmem_ptr(void *addr)
  803. {
  804. return addr;
  805. }
  806. #endif
  807. #ifndef xlate_dev_mem_ptr
  808. #define xlate_dev_mem_ptr xlate_dev_mem_ptr
  809. static inline void *xlate_dev_mem_ptr(phys_addr_t addr)
  810. {
  811. return __va(addr);
  812. }
  813. #endif
  814. #ifndef unxlate_dev_mem_ptr
  815. #define unxlate_dev_mem_ptr unxlate_dev_mem_ptr
  816. static inline void unxlate_dev_mem_ptr(phys_addr_t phys, void *addr)
  817. {
  818. }
  819. #endif
  820. #ifdef CONFIG_VIRT_TO_BUS
  821. #ifndef virt_to_bus
  822. static inline unsigned long virt_to_bus(void *address)
  823. {
  824. return (unsigned long)address;
  825. }
  826. static inline void *bus_to_virt(unsigned long address)
  827. {
  828. return (void *)address;
  829. }
  830. #endif
  831. #endif
  832. #ifndef memset_io
  833. #define memset_io memset_io
  834. static inline void memset_io(volatile void __iomem *addr, int value,
  835. size_t size)
  836. {
  837. memset(__io_virt(addr), value, size);
  838. }
  839. #endif
  840. #ifndef memcpy_fromio
  841. #define memcpy_fromio memcpy_fromio
  842. static inline void memcpy_fromio(void *buffer,
  843. const volatile void __iomem *addr,
  844. size_t size)
  845. {
  846. memcpy(buffer, __io_virt(addr), size);
  847. }
  848. #endif
  849. #ifndef memcpy_toio
  850. #define memcpy_toio memcpy_toio
  851. static inline void memcpy_toio(volatile void __iomem *addr, const void *buffer,
  852. size_t size)
  853. {
  854. memcpy(__io_virt(addr), buffer, size);
  855. }
  856. #endif
  857. #endif /* __KERNEL__ */
  858. #endif /* __ASM_GENERIC_IO_H */