pci-dma.c 2.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114
  1. /* pci-dma.c: Dynamic DMA mapping support for the FRV CPUs that have MMUs
  2. *
  3. * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved.
  4. * Written by David Howells (dhowells@redhat.com)
  5. *
  6. * This program is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU General Public License
  8. * as published by the Free Software Foundation; either version
  9. * 2 of the License, or (at your option) any later version.
  10. */
  11. #include <linux/types.h>
  12. #include <linux/dma-mapping.h>
  13. #include <linux/list.h>
  14. #include <linux/pci.h>
  15. #include <linux/export.h>
  16. #include <linux/highmem.h>
  17. #include <linux/scatterlist.h>
  18. #include <asm/io.h>
  19. static void *frv_dma_alloc(struct device *hwdev, size_t size,
  20. dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
  21. {
  22. void *ret;
  23. ret = consistent_alloc(gfp, size, dma_handle);
  24. if (ret)
  25. memset(ret, 0, size);
  26. return ret;
  27. }
  28. static void frv_dma_free(struct device *hwdev, size_t size, void *vaddr,
  29. dma_addr_t dma_handle, unsigned long attrs)
  30. {
  31. consistent_free(vaddr);
  32. }
  33. static int frv_dma_map_sg(struct device *dev, struct scatterlist *sglist,
  34. int nents, enum dma_data_direction direction,
  35. unsigned long attrs)
  36. {
  37. unsigned long dampr2;
  38. void *vaddr;
  39. int i;
  40. struct scatterlist *sg;
  41. BUG_ON(direction == DMA_NONE);
  42. dampr2 = __get_DAMPR(2);
  43. for_each_sg(sglist, sg, nents, i) {
  44. vaddr = kmap_atomic_primary(sg_page(sg));
  45. frv_dcache_writeback((unsigned long) vaddr,
  46. (unsigned long) vaddr + PAGE_SIZE);
  47. }
  48. kunmap_atomic_primary(vaddr);
  49. if (dampr2) {
  50. __set_DAMPR(2, dampr2);
  51. __set_IAMPR(2, dampr2);
  52. }
  53. return nents;
  54. }
  55. static dma_addr_t frv_dma_map_page(struct device *dev, struct page *page,
  56. unsigned long offset, size_t size,
  57. enum dma_data_direction direction, unsigned long attrs)
  58. {
  59. flush_dcache_page(page);
  60. return (dma_addr_t) page_to_phys(page) + offset;
  61. }
  62. static void frv_dma_sync_single_for_device(struct device *dev,
  63. dma_addr_t dma_handle, size_t size,
  64. enum dma_data_direction direction)
  65. {
  66. flush_write_buffers();
  67. }
  68. static void frv_dma_sync_sg_for_device(struct device *dev,
  69. struct scatterlist *sg, int nelems,
  70. enum dma_data_direction direction)
  71. {
  72. flush_write_buffers();
  73. }
  74. static int frv_dma_supported(struct device *dev, u64 mask)
  75. {
  76. /*
  77. * we fall back to GFP_DMA when the mask isn't all 1s,
  78. * so we can't guarantee allocations that must be
  79. * within a tighter range than GFP_DMA..
  80. */
  81. if (mask < 0x00ffffff)
  82. return 0;
  83. return 1;
  84. }
  85. struct dma_map_ops frv_dma_ops = {
  86. .alloc = frv_dma_alloc,
  87. .free = frv_dma_free,
  88. .map_page = frv_dma_map_page,
  89. .map_sg = frv_dma_map_sg,
  90. .sync_single_for_device = frv_dma_sync_single_for_device,
  91. .sync_sg_for_device = frv_dma_sync_sg_for_device,
  92. .dma_supported = frv_dma_supported,
  93. };
  94. EXPORT_SYMBOL(frv_dma_ops);