adf_memblock.c 4.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160
  1. /*
  2. * Copyright (C) 2013 Google, Inc.
  3. *
  4. * This software is licensed under the terms of the GNU General Public
  5. * License version 2, as published by the Free Software Foundation, and
  6. * may be copied, distributed, and modified under those terms.
  7. *
  8. * This program is distributed in the hope that it will be useful,
  9. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  11. * GNU General Public License for more details.
  12. *
  13. */
  14. #include <linux/dma-buf.h>
  15. #include <linux/highmem.h>
  16. #include <linux/memblock.h>
  17. #include <linux/slab.h>
  18. struct adf_memblock_pdata {
  19. phys_addr_t base;
  20. };
  21. static struct sg_table *adf_memblock_map(struct dma_buf_attachment *attach,
  22. enum dma_data_direction direction)
  23. {
  24. struct adf_memblock_pdata *pdata = attach->dmabuf->priv;
  25. unsigned long pfn = PFN_DOWN(pdata->base);
  26. struct page *page = pfn_to_page(pfn);
  27. struct sg_table *table;
  28. int nents, ret;
  29. table = kzalloc(sizeof(*table), GFP_KERNEL);
  30. if (!table)
  31. return ERR_PTR(-ENOMEM);
  32. ret = sg_alloc_table(table, 1, GFP_KERNEL);
  33. if (ret < 0)
  34. goto err_alloc;
  35. sg_set_page(table->sgl, page, attach->dmabuf->size, 0);
  36. nents = dma_map_sg(attach->dev, table->sgl, 1, direction);
  37. if (!nents) {
  38. ret = -EINVAL;
  39. goto err_map;
  40. }
  41. return table;
  42. err_map:
  43. sg_free_table(table);
  44. err_alloc:
  45. kfree(table);
  46. return ERR_PTR(ret);
  47. }
  48. static void adf_memblock_unmap(struct dma_buf_attachment *attach,
  49. struct sg_table *table, enum dma_data_direction direction)
  50. {
  51. dma_unmap_sg(attach->dev, table->sgl, 1, direction);
  52. sg_free_table(table);
  53. }
  54. static void __init_memblock adf_memblock_release(struct dma_buf *buf)
  55. {
  56. struct adf_memblock_pdata *pdata = buf->priv;
  57. int err = memblock_free(pdata->base, buf->size);
  58. if (err < 0)
  59. pr_warn("%s: freeing memblock failed: %d\n", __func__, err);
  60. kfree(pdata);
  61. }
  62. static void *adf_memblock_do_kmap(struct dma_buf *buf, unsigned long pgoffset,
  63. bool atomic)
  64. {
  65. struct adf_memblock_pdata *pdata = buf->priv;
  66. unsigned long pfn = PFN_DOWN(pdata->base) + pgoffset;
  67. struct page *page = pfn_to_page(pfn);
  68. if (atomic)
  69. return kmap_atomic(page);
  70. else
  71. return kmap(page);
  72. }
  73. static void *adf_memblock_kmap_atomic(struct dma_buf *buf,
  74. unsigned long pgoffset)
  75. {
  76. return adf_memblock_do_kmap(buf, pgoffset, true);
  77. }
  78. static void adf_memblock_kunmap_atomic(struct dma_buf *buf,
  79. unsigned long pgoffset, void *vaddr)
  80. {
  81. kunmap_atomic(vaddr);
  82. }
  83. static void *adf_memblock_kmap(struct dma_buf *buf, unsigned long pgoffset)
  84. {
  85. return adf_memblock_do_kmap(buf, pgoffset, false);
  86. }
  87. static void adf_memblock_kunmap(struct dma_buf *buf, unsigned long pgoffset,
  88. void *vaddr)
  89. {
  90. kunmap(vaddr);
  91. }
  92. static int adf_memblock_mmap(struct dma_buf *buf, struct vm_area_struct *vma)
  93. {
  94. struct adf_memblock_pdata *pdata = buf->priv;
  95. return remap_pfn_range(vma, vma->vm_start, PFN_DOWN(pdata->base),
  96. vma->vm_end - vma->vm_start, vma->vm_page_prot);
  97. }
  98. struct dma_buf_ops adf_memblock_ops = {
  99. .map_dma_buf = adf_memblock_map,
  100. .unmap_dma_buf = adf_memblock_unmap,
  101. .release = adf_memblock_release,
  102. .kmap_atomic = adf_memblock_kmap_atomic,
  103. .kunmap_atomic = adf_memblock_kunmap_atomic,
  104. .kmap = adf_memblock_kmap,
  105. .kunmap = adf_memblock_kunmap,
  106. .mmap = adf_memblock_mmap,
  107. };
  108. /**
  109. * adf_memblock_export - export a memblock reserved area as a dma-buf
  110. *
  111. * @base: base physical address
  112. * @size: memblock size
  113. * @flags: mode flags for the dma-buf's file
  114. *
  115. * @base and @size must be page-aligned.
  116. *
  117. * Returns a dma-buf on success or ERR_PTR(-errno) on failure.
  118. */
  119. struct dma_buf *adf_memblock_export(phys_addr_t base, size_t size, int flags)
  120. {
  121. struct adf_memblock_pdata *pdata;
  122. struct dma_buf *buf;
  123. if (PAGE_ALIGN(base) != base || PAGE_ALIGN(size) != size)
  124. return ERR_PTR(-EINVAL);
  125. pdata = kzalloc(sizeof(*pdata), GFP_KERNEL);
  126. if (!pdata)
  127. return ERR_PTR(-ENOMEM);
  128. pdata->base = base;
  129. buf = dma_buf_export(pdata, &adf_memblock_ops, size, flags, NULL);
  130. if (IS_ERR(buf))
  131. kfree(pdata);
  132. return buf;
  133. }
  134. EXPORT_SYMBOL(adf_memblock_export);