dma-alloc.c 3.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132
  1. /* MN10300 Dynamic DMA mapping support
  2. *
  3. * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
  4. * Written by David Howells (dhowells@redhat.com)
  5. * Derived from: arch/i386/kernel/pci-dma.c
  6. *
  7. * This program is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU General Public Licence
  9. * as published by the Free Software Foundation; either version
  10. * 2 of the Licence, or (at your option) any later version.
  11. */
  12. #include <linux/types.h>
  13. #include <linux/mm.h>
  14. #include <linux/string.h>
  15. #include <linux/pci.h>
  16. #include <linux/gfp.h>
  17. #include <linux/export.h>
  18. #include <asm/io.h>
  19. static unsigned long pci_sram_allocated = 0xbc000000;
  20. static void *mn10300_dma_alloc(struct device *dev, size_t size,
  21. dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
  22. {
  23. unsigned long addr;
  24. void *ret;
  25. pr_debug("dma_alloc_coherent(%s,%zu,%x)\n",
  26. dev ? dev_name(dev) : "?", size, gfp);
  27. if (0xbe000000 - pci_sram_allocated >= size) {
  28. size = (size + 255) & ~255;
  29. addr = pci_sram_allocated;
  30. pci_sram_allocated += size;
  31. ret = (void *) addr;
  32. goto done;
  33. }
  34. /* ignore region specifiers */
  35. gfp &= ~(__GFP_DMA | __GFP_HIGHMEM);
  36. if (dev == NULL || dev->coherent_dma_mask < 0xffffffff)
  37. gfp |= GFP_DMA;
  38. addr = __get_free_pages(gfp, get_order(size));
  39. if (!addr)
  40. return NULL;
  41. /* map the coherent memory through the uncached memory window */
  42. ret = (void *) (addr | 0x20000000);
  43. /* fill the memory with obvious rubbish */
  44. memset((void *) addr, 0xfb, size);
  45. /* write back and evict all cache lines covering this region */
  46. mn10300_dcache_flush_inv_range2(virt_to_phys((void *) addr), PAGE_SIZE);
  47. done:
  48. *dma_handle = virt_to_bus((void *) addr);
  49. printk("dma_alloc_coherent() = %p [%x]\n", ret, *dma_handle);
  50. return ret;
  51. }
  52. static void mn10300_dma_free(struct device *dev, size_t size, void *vaddr,
  53. dma_addr_t dma_handle, unsigned long attrs)
  54. {
  55. unsigned long addr = (unsigned long) vaddr & ~0x20000000;
  56. if (addr >= 0x9c000000)
  57. return;
  58. free_pages(addr, get_order(size));
  59. }
  60. static int mn10300_dma_map_sg(struct device *dev, struct scatterlist *sglist,
  61. int nents, enum dma_data_direction direction,
  62. unsigned long attrs)
  63. {
  64. struct scatterlist *sg;
  65. int i;
  66. for_each_sg(sglist, sg, nents, i) {
  67. BUG_ON(!sg_page(sg));
  68. sg->dma_address = sg_phys(sg);
  69. }
  70. mn10300_dcache_flush_inv();
  71. return nents;
  72. }
  73. static dma_addr_t mn10300_dma_map_page(struct device *dev, struct page *page,
  74. unsigned long offset, size_t size,
  75. enum dma_data_direction direction, unsigned long attrs)
  76. {
  77. return page_to_bus(page) + offset;
  78. }
  79. static void mn10300_dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle,
  80. size_t size, enum dma_data_direction direction)
  81. {
  82. mn10300_dcache_flush_inv();
  83. }
  84. static void mn10300_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
  85. int nelems, enum dma_data_direction direction)
  86. {
  87. mn10300_dcache_flush_inv();
  88. }
  89. static int mn10300_dma_supported(struct device *dev, u64 mask)
  90. {
  91. /*
  92. * we fall back to GFP_DMA when the mask isn't all 1s, so we can't
  93. * guarantee allocations that must be within a tighter range than
  94. * GFP_DMA
  95. */
  96. if (mask < 0x00ffffff)
  97. return 0;
  98. return 1;
  99. }
  100. struct dma_map_ops mn10300_dma_ops = {
  101. .alloc = mn10300_dma_alloc,
  102. .free = mn10300_dma_free,
  103. .map_page = mn10300_dma_map_page,
  104. .map_sg = mn10300_dma_map_sg,
  105. .sync_single_for_device = mn10300_dma_sync_single_for_device,
  106. .sync_sg_for_device = mn10300_dma_sync_sg_for_device,
  107. };